blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
โ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
โ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
โ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b4732a7f6c96f3017dae541e6ef9294eb8632c9c | 6982c3c54ee9199d93fb89c61cfdcba15b9b7012 | /python3_cookbook/chapter08/demo02.py | 85770265aaaf5341ddc89a3e76168dd08817c360 | [] | no_license | gzgdouru/python_study | a640e1097ebc27d12049ded53fb1af3ba9729bac | e24b39e82e39ee5a5e54566781457e18c90a122a | refs/heads/master | 2020-03-29T11:33:13.150869 | 2019-03-08T09:24:29 | 2019-03-08T09:24:29 | 149,858,658 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | '''
่ชๅฎไนๅญ็ฌฆไธฒ็ๆ ผๅผๅ
'''
_formats = {
'ymd': '{d.year}-{d.month}-{d.day}',
'mdy': '{d.month}/{d.day}/{d.year}',
'dmy': '{d.day}/{d.month}/{d.year}'
}
class Date:
def __init__(self, year, month, day):
self.year = year
self.month = month
self.day = day
def __format__(self, format_spec):
if format_spec == '':
format_spec = "ymd"
fmt = _formats[format_spec]
return fmt.format(d=self)
if __name__ == "__main__":
d = Date(2019, 1, 17)
print(format(d))
print(format(d, 'dmy'))
print("this date is {:ymd}".format(d))
print("this date is {:dmy}".format(d))
| [
"[email protected]"
] | |
09925f31f19351fda75ef9c39ecb6ecb186a5c99 | 24f354c0a362c0a44fe0946f0a947930f0724f4d | /tests/unit/config/test_ini.py | d3608a6f540a216b4c83bd97783eb9170438e817 | [
"MIT"
] | permissive | pypa/virtualenv | 783cf226c806bcb44ee63fd87c37d76e90c121ce | 6d22da631fd289f89f921a4010047ad969b7bfa7 | refs/heads/main | 2023-09-04T06:50:16.410634 | 2023-08-30T14:32:38 | 2023-08-30T14:32:38 | 1,446,474 | 4,313 | 1,073 | MIT | 2023-09-12T14:54:09 | 2011-03-06T14:33:27 | Python | UTF-8 | Python | false | false | 842 | py | from __future__ import annotations
import sys
from textwrap import dedent
import pytest
from virtualenv.info import IS_PYPY, IS_WIN, fs_supports_symlink
from virtualenv.run import session_via_cli
@pytest.mark.skipif(not fs_supports_symlink(), reason="symlink is not supported")
@pytest.mark.xfail(IS_PYPY and IS_WIN and sys.version_info[0:2] == (3, 9), reason="symlink is not supported")
def test_ini_can_be_overwritten_by_flag(tmp_path, monkeypatch):
custom_ini = tmp_path / "conf.ini"
custom_ini.write_text(
dedent(
"""
[virtualenv]
copies = True
""",
),
encoding="utf-8",
)
monkeypatch.setenv("VIRTUALENV_CONFIG_FILE", str(custom_ini))
result = session_via_cli(["venv", "--symlinks"])
symlinks = result.creator.symlinks
assert symlinks is True
| [
"[email protected]"
] | |
84a51f1c66522d2338158587e627aa28ee1c0298 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /pyDemMDspSSFdWsh4_2.py | b7cb001bbe31b50d1e5bc5559aa8530b83963619 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py |
def digital_decipher(eMessage, key):
keyPos = 0
key = str(key)
decodeMessage = ''
for digit in eMessage:
decodeMessage += chr(int(digit) - int(key[keyPos])+96)
keyPos += 1
if (keyPos >= len(key)):
keyPos = 0
return decodeMessage
| [
"[email protected]"
] | |
979fdb825b0a5335d0686544a45de62b21f44394 | 747f759311d404af31c0f80029e88098193f6269 | /addons/product_size/__terp__.py | 993d5bef698daa83448dd32d4d3643e51c4d6420 | [] | no_license | sgeerish/sirr_production | 9b0d0f7804a928c0c582ddb4ccb7fcc084469a18 | 1081f3a5ff8864a31b2dcd89406fac076a908e78 | refs/heads/master | 2020-05-19T07:21:37.047958 | 2013-09-15T13:03:36 | 2013-09-15T13:03:36 | 9,648,444 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | /home/openerp/production/extra-addons/product_size/__terp__.py | [
"[email protected]"
] | |
d20971ac7d122528ac943d5a46e7e8f529aa93db | 19a4365d81507587ef09488edc7850c2227e7165 | /159.py | 1269b3a84de6cd5b5911a7d86d4ea7721918348b | [] | no_license | akauntotesuto888/Leetcode-Lintcode-Python | 80d8d9870b3d81da7be9c103199dad618ea8739a | e2fc7d183d4708061ab9b610b3b7b9e2c3dfae6d | refs/heads/master | 2023-08-07T12:53:43.966641 | 2021-09-17T19:51:09 | 2021-09-17T19:51:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | # Time: O(n)
# Space: O(1)
class Solution:
def lengthOfLongestSubstringTwoDistinct(self, s: str) -> int:
d = {}
count = 0
start, end = 0, 0
result = 0
while end < len(s):
c = s[end]
d[c] = d.get(c, 0) + 1
if d[c] == 1:
count += 1
end += 1
while count > 2 and start < len(s):
curr = s[start]
if curr in d:
d[curr] -= 1
if d[curr] == 0:
count -= 1
start += 1
result = max(result, end-start)
return result | [
"[email protected]"
] | |
39dcc0a8cb19d050d40f63af8512633fbbddc8e7 | f17fe3c240aeda4205d934a34fc2fc407c6d9f8a | /backend/silent_lake_29099/wsgi.py | 25d60da238c4e278e073595fb4c11c76d1b79f0b | [] | no_license | crowdbotics-apps/silent-lake-29099 | d5baaa2272c8362af6cc2adacf738c99fa4bb770 | 6993bc68a87d0e835d5d3b4240e9e1412d851528 | refs/heads/master | 2023-06-22T22:49:18.030820 | 2021-07-23T23:51:23 | 2021-07-23T23:51:23 | 388,954,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | """
WSGI config for silent_lake_29099 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'silent_lake_29099.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
a8a55c5ceaf4f047f4055dd69e03d9e78b0fb41b | e8c4392a4470abd770be6805e6032ef36cb50ea9 | /dev/prepare_jvm_release.py | 045adf5bd2e93b90499e8c464ec286e10128b43c | [
"Apache-2.0"
] | permissive | vishalbelsare/xgboost | 3f133a97c20654e1ada64af4d89da2493a0197f0 | b124a27f57c97123daf9629555aa07e90dc77aed | refs/heads/master | 2023-08-17T01:50:45.285904 | 2021-11-23T08:45:36 | 2021-11-23T08:45:36 | 129,266,376 | 0 | 0 | Apache-2.0 | 2021-11-23T18:35:32 | 2018-04-12T14:44:31 | C++ | UTF-8 | Python | false | false | 2,918 | py | import os
import sys
import errno
import subprocess
import glob
import shutil
from contextlib import contextmanager
def normpath(path):
"""Normalize UNIX path to a native path."""
normalized = os.path.join(*path.split("/"))
if os.path.isabs(path):
return os.path.abspath("/") + normalized
else:
return normalized
def cp(source, target):
source = normpath(source)
target = normpath(target)
print("cp {0} {1}".format(source, target))
shutil.copy(source, target)
def maybe_makedirs(path):
path = normpath(path)
print("mkdir -p " + path)
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
@contextmanager
def cd(path):
path = normpath(path)
cwd = os.getcwd()
os.chdir(path)
print("cd " + path)
try:
yield path
finally:
os.chdir(cwd)
def run(command, **kwargs):
print(command)
subprocess.check_call(command, shell=True, **kwargs)
def main():
with cd("jvm-packages/"):
print("====copying pure-Python tracker====")
for use_cuda in [True, False]:
xgboost4j = "xgboost4j-gpu" if use_cuda else "xgboost4j"
cp("../python-package/xgboost/tracker.py", f"{xgboost4j}/src/main/resources")
print("====copying resources for testing====")
with cd("../demo/CLI/regression"):
run(f"{sys.executable} mapfeat.py")
run(f"{sys.executable} mknfold.py machine.txt 1")
for use_cuda in [True, False]:
xgboost4j = "xgboost4j-gpu" if use_cuda else "xgboost4j"
xgboost4j_spark = "xgboost4j-spark-gpu" if use_cuda else "xgboost4j-spark"
maybe_makedirs(f"{xgboost4j}/src/test/resources")
maybe_makedirs(f"{xgboost4j_spark}/src/test/resources")
for file in glob.glob("../demo/data/agaricus.*"):
cp(file, f"{xgboost4j}/src/test/resources")
cp(file, f"{xgboost4j_spark}/src/test/resources")
for file in glob.glob("../demo/CLI/regression/machine.txt.t*"):
cp(file, f"{xgboost4j_spark}/src/test/resources")
print("====Creating directories to hold native binaries====")
for os, arch in [("linux", "x86_64"), ("windows", "x86_64"), ("macos", "x86_64")]:
output_dir = f"xgboost4j/src/main/resources/lib/{os}/{arch}"
maybe_makedirs(output_dir)
for os, arch in [("linux", "x86_64")]:
output_dir = f"xgboost4j-gpu/src/main/resources/lib/{os}/{arch}"
maybe_makedirs(output_dir)
print("====Next Steps====")
print("1. Obtain Linux and Windows binaries from the CI server")
print("2. Put them in xgboost4j(-gpu)/src/main/resources/lib/[os]/[arch]")
print("3. Now on a Mac machine, run:")
print(" GPG_TTY=$(tty) mvn deploy -Prelease -DskipTests")
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
be23945603db0d48e9bf6c9b3d89f7c8c219bc1d | 7d85c42e99e8009f63eade5aa54979abbbe4c350 | /game/tools/build_tools/make.py | 2e55e59e68a8d04a495b61e0b90078d11af6cf1a | [] | no_license | ToontownServerArchive/Cog-Invasion-Online-Alpha | 19c0454da87e47f864c0a5cb8c6835bca6923f0e | 40498d115ed716f1dec12cf40144015c806cc21f | refs/heads/master | 2023-03-25T08:49:40.878384 | 2016-07-05T07:09:36 | 2016-07-05T07:09:36 | 348,172,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,312 | py | '''
Use this script to invoke Nirai builder and compile the game.
This process consists of 3 step:
1. Pack models into a models.mf.
2. Compile src/sample.cxx and generate sample.exe using NiraiCompiler.
3. Generate sample.nri, which contains the Python modules.
'''
import argparse
import sys
import os
from niraitools import *
parser = argparse.ArgumentParser()
parser.add_argument('--compile-cxx', '-c', action='store_true',
help='Compile the CXX codes and generate coginvasion.exe into built.')
parser.add_argument('--make-nri', '-n', action='store_true',
help='Generate coginvasion.nri.')
parser.add_argument('--is-launcher', '-l', action='store_true',
help='Are we compiling the launcher?')
parser.add_argument('--models', '-m', action='store_true',
help='Pack models.mf.')
args = parser.parse_args()
def niraicall_obfuscate(code):
# We'll obfuscate if len(code) % 4 == 0
# This way we make sure both obfuscated and non-obfuscated code work.
if len(code) % 4:
return False, None
# There are several ways to obfuscate it
# For this example, we'll invert the string
return True, code[::-1]
niraimarshal.niraicall_obfuscate = niraicall_obfuscate
class CIOPackager(NiraiPackager):
HEADER = 'COGINVASIONONLINE'
BASEDIR = '.'
def __init__(self, outfile):
if args.is_launcher:
self.HEADER = 'COGINVASIONLAUNCHER'
NiraiPackager.__init__(self, outfile)
self.__manglebase = self.get_mangle_base(self.BASEDIR)
self.add_panda3d_dirs()
self.add_default_lib()
self.add_directory(self.BASEDIR, mangler=self.__mangler)
def __mangler(self, name):
# N.B. Mangler can be used to strip certain files from the build.
# The file is not included if it returns an empty string.
return name[self.__manglebase:].strip('.')
def generate_niraidata(self):
print 'Generating niraidata'
config = self.get_file_contents('tools/build_tools/config.prc', True)
niraidata = 'CONFIG = %r' % config
self.add_module('niraidata', niraidata, compile=True)
def process_modules(self):
'''
This method is called when it's time to write the output.
For sample.nri, we use an encrypted datagram.
The datagram is read by sample.cxx, which populates Python frozen array.
Datagram format:
uint32 numModules
for each module:
string name
int32 size *
data(abs(size))
* Negative size means the file was an __init__
'''
dg = Datagram()
dg.addUint32(len(self.modules))
for moduleName in self.modules:
data, size = self.modules[moduleName]
dg.addString(moduleName)
dg.addInt32(size)
dg.appendData(data)
data = dg.getMessage()
iv = '\0' * 16
if args.is_launcher:
key = 'mmkfcaaph_cil_bm'
else:
key = 'mmkfcaaph_cio_bm'
return aes.encrypt(data, key, iv)
if args.compile_cxx and not args.is_launcher:
compiler = NiraiCompiler('coginvasion.exe')
compiler.add_nirai_files()
compiler.add_source('tools/build_tools/coginvasion.cxx')
compiler.run()
elif args.is_launcher and args.compile_cxx:
compiler = NiraiCompiler('launcher.exe')
compiler.add_nirai_files()
compiler.add_source('tools/build_tools/launcher.cxx')
compiler.run()
if args.make_nri and not args.is_launcher:
pkg = CIOPackager('built/coginvasion.bin')
pkg.add_file('lib/coginvasion/base/CIStartGlobal.py')
pkg.add_directory('lib\\coginvasion')
pkg.generate_niraidata()
pkg.write_out()
elif args.is_launcher and args.make_nri:
pkg = CIOPackager('built/launcher.bin')
pkg.add_file('lib/launcher.py')
pkg.add_file('../Panda3D-CI/python/DLLs/_tkinter.pyd')
pkg.generate_niraidata()
pkg.write_out()
if args.models:
os.chdir('..')
cmd = 'multify -cf build/built/models.mf models'
p = subprocess.Popen(cmd, stdout=sys.stdout, stderr=sys.stderr, shell=True)
v = p.wait()
if v != 0:
print 'The following command returned non-zero value (%d): %s' % (v, cmd[:100] + '...')
sys.exit(1)
| [
"[email protected]"
] | |
baf9e01690d9b7617c973a0ffbeaf8dff30a2ba2 | 3712a929d1124f514ea7af1ac0d4a1de03bb6773 | /ๅผ็ญ็ฌ่ฎฐ/python็ฝ็ป็ฌ่ซ้จๅ/day02/lxmlTest.py | 5436f0ebe77b8b9ef01a6d55ce88f6ab04779c42 | [] | no_license | jiyabing/learning | abd82aa3fd37310b4a98b11ea802c5b0e37b7ad9 | 6059006b0f86aee9a74cfc116d2284eb44173f41 | refs/heads/master | 2020-04-02T20:47:33.025331 | 2018-10-26T05:46:10 | 2018-10-26T05:46:10 | 154,779,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 12 17:20:25 2018
@author: jyb
"""
from lxml import etree
lxmlStr = '''
<bookstore>
<book>
<title lang="en">Harry Potter</title>
<author>J K. Rowling</author>
<year>2005</year>
<price>29.99</price>
</book>
<book>
<title lang="zh">hello world</title>
<author>J K. Rowling</author>
<year>2005</year>
<price>29.99</price>
</book>
</bookstore>
'''
# ๆ น่็น
root = etree.fromstring(lxmlStr)
print(root)
elements = root.xpath('//book/title')
print(elements[0].text)
print(elements[0].attrib)
attrs = root.xpath('//@lang')
print(attrs) | [
"[email protected]"
] | |
89e2373d1870ea26db666a16121383000169882e | 8a081742c8a58c872a15f01d6d4d8c1028e4f7eb | /1404.py | b7f2fe45af35e2bd9b49f8c76ce20624ed64ad2c | [] | no_license | dibery/leetcode | 01b933772e317ccd4885b508de503b7873a4b65f | 096218b5d0b47ce38874c4b7141aca35e9d678c9 | refs/heads/master | 2022-05-20T03:50:45.525256 | 2022-05-17T00:57:48 | 2022-05-17T00:57:48 | 211,606,152 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | class Solution:
def numSteps(self, s: str) -> int:
s, ans = int(s, 2), 0
while s > 1:
ans += 1
s += 1 if s % 2 else s //= 2
return ans
| [
"[email protected]"
] | |
1bccf0f17e21a5f80aa85d92e8131607b1f3fa1c | 9818262abff066b528a4c24333f40bdbe0ae9e21 | /Day 28/UtopianTree.py | 1bc5b897230286bb4173fdca69d29b1cdd03d6f9 | [
"MIT"
] | permissive | skdonepudi/100DaysOfCode | 749f62eef5826cb2ec2a9ab890fa23e784072703 | af4594fb6933e4281d298fa921311ccc07295a7c | refs/heads/master | 2023-02-01T08:51:33.074538 | 2020-12-20T14:02:36 | 2020-12-20T14:02:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,435 | py | '''
The Utopian Tree goes through 2 cycles of growth every year. Each spring, it doubles in height. Each summer, its height increases by 1 meter.
A Utopian Tree sapling with a height of 1 meter is planted at the onset of spring. How tall will the tree be after growth cycles?
For example, if the number of growth cycles is , the calculations are as follows:
Period Height
0 1
1 2
2 3
3 6
4 7
5 14
Function Description
Complete the utopianTree function in the editor below.
utopianTree has the following parameter(s):
int n: the number of growth cycles to simulate
Returns
int: the height of the tree after the given number of cycles
Input Format
The first line contains an integer, , the number of test cases.
subsequent lines each contain an integer, , the number of cycles for that test case.
Sample Input
3
0
1
4
Sample Output
1
2
7
'''
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the utopianTree function below.
def utopianTree(n):
if n < 3:
return n + 1
if n % 2 == 0:
return (utopianTree(n - 2) * 2) + 1
else:
return (utopianTree(n - 2) + 1) * 2
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
n = int(input())
result = utopianTree(n)
fptr.write(str(result) + '\n')
fptr.close()
| [
"[email protected]"
] | |
d7ce82cd92dd0e5ae2c3e33a2bbb2f04c5a3d44b | 987697512ce9b8d7c29bfd2f18d5aec0261a6863 | /ๆ้ฟๅๆไธฒ.py | 8b40ab6ad9297a3c3ba8188befafb8af968ff812 | [] | no_license | Luckyaxah/leetcode-python | 65e7ff59d6f19312defdc4d4b4103c39193b198a | 2b9c78ba88e7bf74a46a287fb1914b4d6ba9af38 | refs/heads/master | 2023-06-05T12:15:31.618879 | 2021-06-22T13:05:30 | 2021-06-22T13:05:30 | 262,287,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | class Solution:
def longestPalindrome(self, s: str) -> int:
d = {}
for i in s:
if not i in d:
d[i] = 1
else:
d[i] += 1
ret = 0
m = 0
for i in d:
if d[i] % 2 ==0:
ret += d[i]
else:
ret += d[i]-1
if ret < len(s):
ret += 1
return ret
if __name__ == "__main__":
a = Solution()
print(a.longestPalindrome("civilwartestingwhee")) | [
"[email protected]"
] | |
f37207209c196d663aa2e43026e64b1a2b9cd70e | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /great_year_and_same_woman/important_part/group/want_old_place.py | 611d05cfce1273ef655fed24e8b67caedddea3ff | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py |
#! /usr/bin/env python
def great_work(str_arg):
life(str_arg)
print('able_person')
def life(str_arg):
print(str_arg)
if __name__ == '__main__':
great_work('life_and_child')
| [
"[email protected]"
] | |
350aba3f5ae51a3e7b63b960204bddbeee38d131 | d01822ba7fa5522c89c83f20907003f5c4823dde | /CNN_GPU.py | 559d15521dd4810f15c81c1a9f11094f08a722f2 | [] | no_license | cp4011/Neural-Network | 853aab63c871d19aeb73911af56ccf9351ad1f3c | 93b33b6a14fed7010285da8fb3efc4a62152cef3 | refs/heads/master | 2020-05-02T07:22:18.778871 | 2019-04-15T10:49:40 | 2019-04-15T10:49:40 | 177,816,249 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,451 | py | import torch
import torch.nn as nn
import torch.utils.data as Data
import torchvision
# torch.manual_seed(1)
EPOCH = 1
BATCH_SIZE = 50
LR = 0.001
DOWNLOAD_MNIST = False
train_data = torchvision.datasets.MNIST(root='./mnist/', train=True, transform=torchvision.transforms.ToTensor(), download=DOWNLOAD_MNIST,)
train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
test_data = torchvision.datasets.MNIST(root='./mnist/', train=False)
# !!!!!!!! Change in here !!!!!!!!! #
test_x = torch.unsqueeze(test_data.test_data, dim=1).type(torch.FloatTensor)[:2000].cuda()/255. # Tensor on GPU
test_y = test_data.test_labels[:2000].cuda()
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5, stride=1, padding=2,),
nn.ReLU(), nn.MaxPool2d(kernel_size=2),)
self.conv2 = nn.Sequential(nn.Conv2d(16, 32, 5, 1, 2), nn.ReLU(), nn.MaxPool2d(2),)
self.out = nn.Linear(32 * 7 * 7, 10)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = x.view(x.size(0), -1)
output = self.out(x)
return output
cnn = CNN()
# !!!!!!!! Change in here !!!!!!!!! #
cnn.cuda() # Moves all model parameters and buffers to the GPU.
optimizer = torch.optim.Adam(cnn.parameters(), lr=LR)
loss_func = nn.CrossEntropyLoss()
for epoch in range(EPOCH):
for step, (x, y) in enumerate(train_loader):
# !!!!!!!! Change in here !!!!!!!!! #
b_x = x.cuda() # Tensor on GPU
b_y = y.cuda() # Tensor on GPU
output = cnn(b_x)
loss = loss_func(output, b_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step % 50 == 0:
test_output = cnn(test_x)
# !!!!!!!! Change in here !!!!!!!!! #
pred_y = torch.max(test_output, 1)[1].cuda().data # move the computation in GPU
accuracy = torch.sum(pred_y == test_y).type(torch.FloatTensor) / test_y.size(0)
print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.cpu().numpy(), '| test accuracy: %.2f' % accuracy)
test_output = cnn(test_x[:10])
# !!!!!!!! Change in here !!!!!!!!! #
pred_y = torch.max(test_output, 1)[1].cuda().data # move the computation in GPU
print(pred_y, 'prediction number')
print(test_y[:10], 'real number')
| [
"[email protected]"
] | |
a6f529f5c6d00ff3aba3df255dba713e85eac766 | 1c07579679f8a4c861777cff4faf30a8064862db | /social/__init__.py | e993f2fda050c964968927a8b09d383d28ce9fcc | [
"BSD-3-Clause",
"Python-2.0",
"BSD-2-Clause"
] | permissive | florelui001/python-social-auth | 86591a1c12dfc011a0d755a7b397691e54821400 | 81093d2135c3eafd6fc5dd763f31a7889a9f1ce4 | refs/heads/master | 2021-01-15T08:10:42.444979 | 2014-02-27T21:00:17 | 2014-02-27T21:00:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | """
python-social-auth application, allows OpenId or OAuth user
registration/authentication just adding a few configurations.
"""
version = (0, 1, 22)
extra = '-dev'
__version__ = '.'.join(map(str, version)) + extra
| [
"[email protected]"
] | |
b04b9e636a59a5c6f889dd245dae7adbd868af09 | 8f6946286dfad1d61be7425dde737daed7027c3f | /ckstyle/command/args.py | 0c7551eafb4fb344689f8901ef20f989f68d82c1 | [
"BSD-3-Clause"
] | permissive | ljspace/CSSCheckStyle | e75d7616d8c9444b581b38a91a10aff7b4f731ad | c12be2181d6576349bf52c218d8fb1809c11da12 | refs/heads/master | 2021-01-16T20:29:55.157662 | 2013-04-16T03:58:36 | 2013-04-16T03:58:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,211 | py | class CommandArgs():
def __init__(self):
self.operation = None
self.errorLevel = 2
self.recursive = False
self.printFlag = False
self.extension = '.ckstyle.txt'
self.include = 'all'
self.exclude = 'none'
self.standard = ''
self.exportJson = False
self.ignoreRuleSets = ['@unit-test-expecteds']
self.fixedExtension = '.fixed.css'
self.fixToSingleLine = False
self.compressConfig = CompressArgs()
self.safeMode = False
self.noBak = False
# current browser
self._curBrowser = None
def __str__(self):
return 'errorLevel: %s\n recursive: %s\n printFlag: %s\n extension: %s\n include: %s\n exclude: %s' % (self.errorLevel, self.recursive, self.printFlag, self.extension, self.include, self.exclude)
class CompressArgs():
def __init__(self):
self.extension = '.min.css'
self.combineFile = True
self.browsers = None
self.noBak = False
def __str__(self):
return 'extension: %s, combineFile: %s, browsers: %s' % (self.recursive, self.extension, self.combineAttr, self.combineRuleSet, self.combineFile, self.browsers)
| [
"[email protected]"
] | |
c2e3ddd4b12b41ff8990d4864171cbf2b80ae6d7 | 159d4ae61f4ca91d94e29e769697ff46d11ae4a4 | /venv/lib/python3.9/site-packages/pandas/tests/resample/test_resampler_grouper.py | 204efa0e5670fcd8e1e750dae7e7b34df513e81a | [
"MIT"
] | permissive | davidycliao/bisCrawler | 729db002afe10ae405306b9eed45b782e68eace8 | f42281f35b866b52e5860b6a062790ae8147a4a4 | refs/heads/main | 2023-05-24T00:41:50.224279 | 2023-01-22T23:17:51 | 2023-01-22T23:17:51 | 411,470,732 | 8 | 0 | MIT | 2023-02-09T16:28:24 | 2021-09-28T23:48:13 | Python | UTF-8 | Python | false | false | 14,411 | py | from textwrap import dedent
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.util._test_decorators import async_mark
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
TimedeltaIndex,
Timestamp,
)
import pandas._testing as tm
from pandas.core.indexes.datetimes import date_range
test_frame = DataFrame(
{"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)},
index=date_range("1/1/2000", freq="s", periods=40),
)
@async_mark()
@td.check_file_leaks
async def test_tab_complete_ipython6_warning(ip):
from IPython.core.completer import provisionalcompleter
code = dedent(
"""\
import pandas._testing as tm
s = tm.makeTimeSeries()
rs = s.resample("D")
"""
)
await ip.run_code(code)
# GH 31324 newer jedi version raises Deprecation warning;
# appears resolved 2021-02-02
with tm.assert_produces_warning(None):
with provisionalcompleter("ignore"):
list(ip.Completer.completions("rs.", 1))
def test_deferred_with_groupby():
# GH 12486
# support deferred resample ops with groupby
data = [
["2010-01-01", "A", 2],
["2010-01-02", "A", 3],
["2010-01-05", "A", 8],
["2010-01-10", "A", 7],
["2010-01-13", "A", 3],
["2010-01-01", "B", 5],
["2010-01-03", "B", 2],
["2010-01-04", "B", 1],
["2010-01-11", "B", 7],
["2010-01-14", "B", 3],
]
df = DataFrame(data, columns=["date", "id", "score"])
df.date = pd.to_datetime(df.date)
def f(x):
return x.set_index("date").resample("D").asfreq()
expected = df.groupby("id").apply(f)
result = df.set_index("date").groupby("id").resample("D").asfreq()
tm.assert_frame_equal(result, expected)
df = DataFrame(
{
"date": date_range(start="2016-01-01", periods=4, freq="W"),
"group": [1, 1, 2, 2],
"val": [5, 6, 7, 8],
}
).set_index("date")
def f(x):
return x.resample("1D").ffill()
expected = df.groupby("group").apply(f)
result = df.groupby("group").resample("1D").ffill()
tm.assert_frame_equal(result, expected)
def test_getitem():
g = test_frame.groupby("A")
expected = g.B.apply(lambda x: x.resample("2s").mean())
result = g.resample("2s").B.mean()
tm.assert_series_equal(result, expected)
result = g.B.resample("2s").mean()
tm.assert_series_equal(result, expected)
result = g.resample("2s").mean().B
tm.assert_series_equal(result, expected)
def test_getitem_multiple():
# GH 13174
# multiple calls after selection causing an issue with aliasing
data = [{"id": 1, "buyer": "A"}, {"id": 2, "buyer": "B"}]
df = DataFrame(data, index=date_range("2016-01-01", periods=2))
r = df.groupby("id").resample("1D")
result = r["buyer"].count()
expected = Series(
[1, 1],
index=pd.MultiIndex.from_tuples(
[(1, Timestamp("2016-01-01")), (2, Timestamp("2016-01-02"))],
names=["id", None],
),
name="buyer",
)
tm.assert_series_equal(result, expected)
result = r["buyer"].count()
tm.assert_series_equal(result, expected)
def test_groupby_resample_on_api_with_getitem():
# GH 17813
df = DataFrame(
{"id": list("aabbb"), "date": date_range("1-1-2016", periods=5), "data": 1}
)
exp = df.set_index("date").groupby("id").resample("2D")["data"].sum()
result = df.groupby("id").resample("2D", on="date")["data"].sum()
tm.assert_series_equal(result, exp)
def test_groupby_with_origin():
# GH 31809
freq = "1399min" # prime number that is smaller than 24h
start, end = "1/1/2000 00:00:00", "1/31/2000 00:00"
middle = "1/15/2000 00:00:00"
rng = date_range(start, end, freq="1231min") # prime number
ts = Series(np.random.randn(len(rng)), index=rng)
ts2 = ts[middle:end]
# proves that grouper without a fixed origin does not work
# when dealing with unusual frequencies
simple_grouper = pd.Grouper(freq=freq)
count_ts = ts.groupby(simple_grouper).agg("count")
count_ts = count_ts[middle:end]
count_ts2 = ts2.groupby(simple_grouper).agg("count")
with pytest.raises(AssertionError, match="Index are different"):
tm.assert_index_equal(count_ts.index, count_ts2.index)
# test origin on 1970-01-01 00:00:00
origin = Timestamp(0)
adjusted_grouper = pd.Grouper(freq=freq, origin=origin)
adjusted_count_ts = ts.groupby(adjusted_grouper).agg("count")
adjusted_count_ts = adjusted_count_ts[middle:end]
adjusted_count_ts2 = ts2.groupby(adjusted_grouper).agg("count")
tm.assert_series_equal(adjusted_count_ts, adjusted_count_ts2)
# test origin on 2049-10-18 20:00:00
origin_future = Timestamp(0) + pd.Timedelta("1399min") * 30_000
adjusted_grouper2 = pd.Grouper(freq=freq, origin=origin_future)
adjusted2_count_ts = ts.groupby(adjusted_grouper2).agg("count")
adjusted2_count_ts = adjusted2_count_ts[middle:end]
adjusted2_count_ts2 = ts2.groupby(adjusted_grouper2).agg("count")
tm.assert_series_equal(adjusted2_count_ts, adjusted2_count_ts2)
# both grouper use an adjusted timestamp that is a multiple of 1399 min
# they should be equals even if the adjusted_timestamp is in the future
tm.assert_series_equal(adjusted_count_ts, adjusted2_count_ts2)
def test_nearest():
# GH 17496
# Resample nearest
index = date_range("1/1/2000", periods=3, freq="T")
result = Series(range(3), index=index).resample("20s").nearest()
expected = Series(
[0, 0, 1, 1, 1, 2, 2],
index=pd.DatetimeIndex(
[
"2000-01-01 00:00:00",
"2000-01-01 00:00:20",
"2000-01-01 00:00:40",
"2000-01-01 00:01:00",
"2000-01-01 00:01:20",
"2000-01-01 00:01:40",
"2000-01-01 00:02:00",
],
dtype="datetime64[ns]",
freq="20S",
),
)
tm.assert_series_equal(result, expected)
def test_methods():
g = test_frame.groupby("A")
r = g.resample("2s")
for f in ["first", "last", "median", "sem", "sum", "mean", "min", "max"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample("2s"), f)())
tm.assert_frame_equal(result, expected)
for f in ["size"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample("2s"), f)())
tm.assert_series_equal(result, expected)
for f in ["count"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample("2s"), f)())
tm.assert_frame_equal(result, expected)
# series only
for f in ["nunique"]:
result = getattr(r.B, f)()
expected = g.B.apply(lambda x: getattr(x.resample("2s"), f)())
tm.assert_series_equal(result, expected)
for f in ["nearest", "backfill", "ffill", "asfreq"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample("2s"), f)())
tm.assert_frame_equal(result, expected)
result = r.ohlc()
expected = g.apply(lambda x: x.resample("2s").ohlc())
tm.assert_frame_equal(result, expected)
for f in ["std", "var"]:
result = getattr(r, f)(ddof=1)
expected = g.apply(lambda x: getattr(x.resample("2s"), f)(ddof=1))
tm.assert_frame_equal(result, expected)
def test_apply():
g = test_frame.groupby("A")
r = g.resample("2s")
# reduction
expected = g.resample("2s").sum()
def f(x):
return x.resample("2s").sum()
result = r.apply(f)
tm.assert_frame_equal(result, expected)
def f(x):
return x.resample("2s").apply(lambda y: y.sum())
result = g.apply(f)
# y.sum() results in int64 instead of int32 on 32-bit architectures
expected = expected.astype("int64")
tm.assert_frame_equal(result, expected)
def test_apply_with_mutated_index():
# GH 15169
index = date_range("1-1-2015", "12-31-15", freq="D")
df = DataFrame(data={"col1": np.random.rand(len(index))}, index=index)
def f(x):
s = Series([1, 2], index=["a", "b"])
return s
expected = df.groupby(pd.Grouper(freq="M")).apply(f)
result = df.resample("M").apply(f)
tm.assert_frame_equal(result, expected)
# A case for series
expected = df["col1"].groupby(pd.Grouper(freq="M")).apply(f)
result = df["col1"].resample("M").apply(f)
tm.assert_series_equal(result, expected)
def test_apply_columns_multilevel():
# GH 16231
cols = pd.MultiIndex.from_tuples([("A", "a", "", "one"), ("B", "b", "i", "two")])
ind = date_range(start="2017-01-01", freq="15Min", periods=8)
df = DataFrame(np.array([0] * 16).reshape(8, 2), index=ind, columns=cols)
agg_dict = {col: (np.sum if col[3] == "one" else np.mean) for col in df.columns}
result = df.resample("H").apply(lambda x: agg_dict[x.name](x))
expected = DataFrame(
2 * [[0, 0.0]],
index=date_range(start="2017-01-01", freq="1H", periods=2),
columns=pd.MultiIndex.from_tuples(
[("A", "a", "", "one"), ("B", "b", "i", "two")]
),
)
tm.assert_frame_equal(result, expected)
def test_resample_groupby_with_label():
# GH 13235
index = date_range("2000-01-01", freq="2D", periods=5)
df = DataFrame(index=index, data={"col0": [0, 0, 1, 1, 2], "col1": [1, 1, 1, 1, 1]})
result = df.groupby("col0").resample("1W", label="left").sum()
mi = [
np.array([0, 0, 1, 2]),
pd.to_datetime(
np.array(["1999-12-26", "2000-01-02", "2000-01-02", "2000-01-02"])
),
]
mindex = pd.MultiIndex.from_arrays(mi, names=["col0", None])
expected = DataFrame(
data={"col0": [0, 0, 2, 2], "col1": [1, 1, 2, 1]}, index=mindex
)
tm.assert_frame_equal(result, expected)
def test_consistency_with_window():
# consistent return values with window
df = test_frame
expected = pd.Int64Index([1, 2, 3], name="A")
result = df.groupby("A").resample("2s").mean()
assert result.index.nlevels == 2
tm.assert_index_equal(result.index.levels[0], expected)
result = df.groupby("A").rolling(20).mean()
assert result.index.nlevels == 2
tm.assert_index_equal(result.index.levels[0], expected)
def test_median_duplicate_columns():
# GH 14233
df = DataFrame(
np.random.randn(20, 3),
columns=list("aaa"),
index=date_range("2012-01-01", periods=20, freq="s"),
)
df2 = df.copy()
df2.columns = ["a", "b", "c"]
expected = df2.resample("5s").median()
result = df.resample("5s").median()
expected.columns = result.columns
tm.assert_frame_equal(result, expected)
def test_apply_to_one_column_of_df():
# GH: 36951
df = DataFrame(
{"col": range(10), "col1": range(10, 20)},
index=date_range("2012-01-01", periods=10, freq="20min"),
)
# access "col" via getattr -> make sure we handle AttributeError
result = df.resample("H").apply(lambda group: group.col.sum())
expected = Series(
[3, 12, 21, 9], index=date_range("2012-01-01", periods=4, freq="H")
)
tm.assert_series_equal(result, expected)
# access "col" via _getitem__ -> make sure we handle KeyErrpr
result = df.resample("H").apply(lambda group: group["col"].sum())
tm.assert_series_equal(result, expected)
def test_resample_groupby_agg():
# GH: 33548
df = DataFrame(
{
"cat": [
"cat_1",
"cat_1",
"cat_2",
"cat_1",
"cat_2",
"cat_1",
"cat_2",
"cat_1",
],
"num": [5, 20, 22, 3, 4, 30, 10, 50],
"date": [
"2019-2-1",
"2018-02-03",
"2020-3-11",
"2019-2-2",
"2019-2-2",
"2018-12-4",
"2020-3-11",
"2020-12-12",
],
}
)
df["date"] = pd.to_datetime(df["date"])
resampled = df.groupby("cat").resample("Y", on="date")
expected = resampled.sum()
result = resampled.agg({"num": "sum"})
tm.assert_frame_equal(result, expected)
def test_resample_groupby_agg_listlike():
# GH 42905
ts = Timestamp("2021-02-28 00:00:00")
df = DataFrame({"class": ["beta"], "value": [69]}, index=Index([ts], name="date"))
resampled = df.groupby("class").resample("M")["value"]
result = resampled.agg(["sum", "size"])
expected = DataFrame(
[[69, 1]],
index=pd.MultiIndex.from_tuples([("beta", ts)], names=["class", "date"]),
columns=["sum", "size"],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("keys", [["a"], ["a", "b"]])
def test_empty(keys):
# GH 26411
df = DataFrame([], columns=["a", "b"], index=TimedeltaIndex([]))
result = df.groupby(keys).resample(rule=pd.to_timedelta("00:00:01")).mean()
expected = DataFrame(columns=["a", "b"]).set_index(keys, drop=False)
if len(keys) == 1:
expected.index.name = keys[0]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("consolidate", [True, False])
def test_resample_groupby_agg_object_dtype_all_nan(consolidate):
# https://github.com/pandas-dev/pandas/issues/39329
dates = date_range("2020-01-01", periods=15, freq="D")
df1 = DataFrame({"key": "A", "date": dates, "col1": range(15), "col_object": "val"})
df2 = DataFrame({"key": "B", "date": dates, "col1": range(15)})
df = pd.concat([df1, df2], ignore_index=True)
if consolidate:
df = df._consolidate()
result = df.groupby(["key"]).resample("W", on="date").min()
idx = pd.MultiIndex.from_arrays(
[
["A"] * 3 + ["B"] * 3,
pd.to_datetime(["2020-01-05", "2020-01-12", "2020-01-19"] * 2),
],
names=["key", "date"],
)
expected = DataFrame(
{
"key": ["A"] * 3 + ["B"] * 3,
"date": pd.to_datetime(["2020-01-01", "2020-01-06", "2020-01-13"] * 2),
"col1": [0, 5, 12] * 2,
"col_object": ["val"] * 3 + [np.nan] * 3,
},
index=idx,
)
tm.assert_frame_equal(result, expected)
| [
"[email protected]"
] | |
9615461a5293189ea9ccf409f475b6f55413cc97 | 7a550d2268bc4bc7e2fec608ffb1db4b2e5e94a0 | /1201-1300/1273-Delete Tree Nodes/1273-Delete Tree Nodes.py | 5aaf4cc9fdfc040b2b1d974afc0e5e5b2294eae5 | [
"MIT"
] | permissive | jiadaizhao/LeetCode | be31bd0db50cc6835d9c9eff8e0175747098afc6 | 4ddea0a532fe7c5d053ffbd6870174ec99fc2d60 | refs/heads/master | 2021-11-05T04:38:47.252590 | 2021-10-31T09:54:53 | 2021-10-31T09:54:53 | 99,655,604 | 52 | 28 | MIT | 2020-10-02T12:47:47 | 2017-08-08T05:57:26 | C++ | UTF-8 | Python | false | false | 545 | py | import collections
class Solution:
def deleteTreeNodes(self, nodes: int, parent: List[int], value: List[int]) -> int:
graph = collections.defaultdict(list)
for i in range(1, len(parent)):
graph[parent[i]].append(i)
def dfs(root):
total = value[root]
count = 1
for child in graph[root]:
s, c = dfs(child)
total += s
count += c
return total, count if total != 0 else 0
return dfs(0)[1]
| [
"[email protected]"
] | |
b56cf17c850ee1b033aa5372bb53774fe8d95850 | dd221d1ab80a49190a0c93277e2471debaa2db95 | /hanlp/components/parsers/ud/ud_model.py | 729f49c1af9decf8c14572ed1691eed63ed91021 | [
"Apache-2.0",
"CC-BY-NC-SA-4.0"
] | permissive | hankcs/HanLP | 29a22d4e240617e4dc67929c2f9760a822402cf7 | be2f04905a12990a527417bd47b79b851874a201 | refs/heads/doc-zh | 2023-08-18T12:48:43.533453 | 2020-02-15T17:19:28 | 2023-03-14T02:46:03 | 24,976,755 | 32,454 | 9,770 | Apache-2.0 | 2023-08-13T03:11:39 | 2014-10-09T06:36:16 | Python | UTF-8 | Python | false | false | 5,198 | py | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-12-15 14:21
from typing import Dict, Any
import torch
from hanlp.components.parsers.biaffine.biaffine_dep import BiaffineDependencyParser
from hanlp.components.parsers.biaffine.biaffine_model import BiaffineDecoder
from hanlp.components.parsers.ud.tag_decoder import TagDecoder
from hanlp.layers.embeddings.contextual_word_embedding import ContextualWordEmbeddingModule
from hanlp.layers.scalar_mix import ScalarMixWithDropout
class UniversalDependenciesModel(torch.nn.Module):
def __init__(self,
encoder: ContextualWordEmbeddingModule,
n_mlp_arc,
n_mlp_rel,
mlp_dropout,
num_rels,
num_lemmas,
num_upos,
num_feats,
mix_embedding: int = 13,
layer_dropout: int = 0.0):
super().__init__()
self.encoder = encoder
self.decoder = UniversalDependenciesDecoder(
encoder.get_output_dim(),
n_mlp_arc,
n_mlp_rel,
mlp_dropout,
num_rels,
num_lemmas,
num_upos,
num_feats,
mix_embedding,
layer_dropout
)
def forward(self,
batch: Dict[str, torch.Tensor],
mask,
):
hidden = self.encoder(batch)
return self.decoder(hidden, batch=batch, mask=mask)
class UniversalDependenciesDecoder(torch.nn.Module):
def __init__(self,
hidden_size,
n_mlp_arc,
n_mlp_rel,
mlp_dropout,
num_rels,
num_lemmas,
num_upos,
num_feats,
mix_embedding: int = 13,
layer_dropout: int = 0.0,
) -> None:
super(UniversalDependenciesDecoder, self).__init__()
# decoders
self.decoders = torch.nn.ModuleDict({
'lemmas': TagDecoder(hidden_size, num_lemmas, label_smoothing=0.03, adaptive=True),
'upos': TagDecoder(hidden_size, num_upos, label_smoothing=0.03, adaptive=True),
'deps': BiaffineDecoder(hidden_size, n_mlp_arc, n_mlp_rel, mlp_dropout, num_rels),
'feats': TagDecoder(hidden_size, num_feats, label_smoothing=0.03, adaptive=True),
})
self.gold_keys = {
'lemmas': 'lemma_id',
'upos': 'pos_id',
'feats': 'feat_id',
}
if mix_embedding:
self.scalar_mix = torch.nn.ModuleDict({
task: ScalarMixWithDropout((1, mix_embedding),
do_layer_norm=False,
dropout=layer_dropout)
for task in self.decoders
})
else:
self.scalar_mix = None
def forward(self,
hidden,
batch: Dict[str, torch.Tensor],
mask) -> Dict[str, Any]:
mask_without_root = mask.clone()
mask_without_root[:, 0] = False
logits = {}
class_probabilities = {}
output_dict = {"logits": logits,
"class_probabilities": class_probabilities}
loss = 0
arc = batch.get('arc', None)
# Run through each of the tasks on the shared encoder and save predictions
for task in self.decoders:
if self.scalar_mix:
decoder_input = self.scalar_mix[task](hidden, mask)
else:
decoder_input = hidden
if task == "deps":
s_arc, s_rel = self.decoders[task](decoder_input, mask)
pred_output = {'class_probabilities': {'s_arc': s_arc, 's_rel': s_rel}}
if arc is not None:
# noinspection PyTypeChecker
pred_output['loss'] = BiaffineDependencyParser.compute_loss(None, s_arc, s_rel, arc,
batch['rel_id'],
mask_without_root,
torch.nn.functional.cross_entropy)
else:
pred_output = self.decoders[task](decoder_input, mask_without_root,
batch.get(self.gold_keys[task], None))
if 'logits' in pred_output:
logits[task] = pred_output["logits"]
if 'class_probabilities' in pred_output:
class_probabilities[task] = pred_output["class_probabilities"]
if 'loss' in pred_output:
# Keep track of the loss if we have the gold tags available
loss += pred_output["loss"]
if arc is not None:
output_dict["loss"] = loss
return output_dict
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
for task in self.tasks:
self.decoders[task].decode(output_dict)
return output_dict
| [
"[email protected]"
] | |
34da4c02c5e9aef4bb76bf8ab68e179817b9db01 | 42a7b34bce1d2968079c6ea034d4e3f7bb5802ad | /ex3.py | da806348e797aa669ec5014ca90987dda6716f49 | [] | no_license | linpan/LPTHW | 45c9f11265b5e1ffe0387a56cec192fa12c6c4d5 | 227bfee3098e8ecb5f07ffc3a0b8e64a853106ce | refs/heads/master | 2021-04-26T13:42:56.859644 | 2014-12-18T15:21:14 | 2014-12-18T15:21:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | #! /usr/bin/env python
#coding:utf-8
print "I will now count my chieckens."
print "Hens", 25 + 30 / 6
print "Roosters", 100 - 25 * 3 % 4
print "Now I will count the eggs:"
print 3 + 2 + 1 - 5 + 4 % 2 - 1 / 4 +6
print "Is it true that 3 + 2 < 5 -7 ?"
print 3 + 2 < 5 -7
print "What is 3 +2 ?", 3 + 2
print "What is 5 -7 ?", 5 - 7
print "Oh,that's why it's False."
print "How about some more."
print "Is it greater?", 5 > -2
print "is it greater or equal?", 5 >= -2
print "Is it less or equal?", 5 <= -2
| [
"[email protected]"
] | |
753ebea727be64a72b3dfbff1b574f0a142ce574 | b2968e2b2092971f6fd72f9c72b50b5faf304985 | /zjazd_4/math_examples.py | e68d657f4a70ceb57b0ca23ad3216c6fa53cfe9c | [] | no_license | ArturoWest/pythonbootcamp | 815d0a3d6b29f12efdbd47fc7b7b7dfd18bff24f | fa7b20dfc71dcd80c201f28c72086294e482b075 | refs/heads/master | 2020-03-31T02:55:15.574065 | 2018-12-02T14:35:01 | 2018-12-02T14:35:01 | 151,844,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | import math
print(math.sin(math.pi/2))
print(dir(math))
"""
Stwรณrz klasฤ sfera
s = Sfera(10)
s.promien # 10
s.objetosc() # 4188.78...
s.pole_powierzchni() # 1256.63...
"""
class Kula:
def __init__(self, r):
self.promien = r
def objetosc(self):
return (4/3) * math.pi * math.pow(self.promien, 3)
def pole_powierzchni(self):
return 4 * math.pi * self.promien ** 2
s = Kula(10)
print(s.objetosc())
print(s.pole_powierzchni())
| [
"[email protected]"
] | |
6e06d589ab36e4ea0c4a28dbb5f19654f5117e41 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5636311922769920_0/Python/ricbit/fractiles.py | 92d6025f44c5533c922023d088ce4a84b348b55a | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | for case in xrange(input()):
k,c,s = map(int, raw_input().split())
print "Case #%d: %s" % (case + 1, ' '.join(str(1+i) for i in xrange(k)))
| [
"[email protected]"
] | |
7373a853fc106496505b63aa97cb81a3b4c74a2d | 04740a66d98730afca496eb0cf5e7b5edea5f6e6 | /backend/dataset/strStr/strmatch_16.py | 9c00b55f82dfafeb143e4c6fb29fe88f22448f09 | [] | no_license | mehulthakral/logic_detector | 0c06fbd12d77a02c888d0bbe3e6776a18f2f46e3 | f7a07a6d229b250da9e02d3fac1a12fa51be97e8 | refs/heads/master | 2023-04-12T12:45:29.370502 | 2021-05-05T17:15:02 | 2021-05-05T17:15:02 | 323,953,099 | 2 | 0 | null | 2021-05-03T16:50:44 | 2020-12-23T16:39:28 | null | UTF-8 | Python | false | false | 250 | py | class Solution:
def strStr(self, haystack, needle):
n, h = len(needle), len(haystack)
hash_n = hash(needle)
for i in range(h-n+1):
if hash(haystack[i:i+n]) == hash_n:
return i
return -1
| [
"[email protected]"
] | |
33d8103a2f341f6f29a3359b9fa3be7c61b5e3ca | caa7a39055c3451db43b39ffc5e70dc560749334 | /contactus/models.py | ca5c45307ce64df354bddb41c95112374e65bc33 | [] | no_license | OneStage-NITW/website | da2438e3857c03a0c38fa6db6a33619b330a3e0d | af86e38560f16f70a0b74bcf2aeab4d855fbdc74 | refs/heads/master | 2016-08-12T15:17:14.577895 | 2015-05-31T18:10:52 | 2015-05-31T18:10:52 | 36,546,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | from django.db import models
# Create your models here.
class Supporter(models.Model):
name=models.CharField(max_length=100)
| [
"[email protected]"
] | |
954d4f04753e7d4fd2561471a3d7d2caf2b10d6c | 93c7eebb83b88cd4bfb06b6e5695ad785c84f1d6 | /tazebao/newsletter/migrations/0019_tracking_notes.py | 60750c49486164f1f0ac14d3f046001e45690468 | [] | no_license | otto-torino/tazebao | 960e31a576f4acc7cd4572e589424f54a8e9b166 | 12db8605b5aa9c8bf4f735a03af90d0989018105 | refs/heads/master | 2023-08-09T02:06:14.749976 | 2023-07-28T07:20:23 | 2023-07-28T07:20:23 | 68,196,585 | 5 | 0 | null | 2022-12-08T05:25:04 | 2016-09-14T10:21:21 | HTML | UTF-8 | Python | false | false | 493 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-27 12:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('newsletter', '0018_tracking_type'),
]
operations = [
migrations.AddField(
model_name='tracking',
name='notes',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='note'),
),
]
| [
"[email protected]"
] | |
ffa7006b45dc4d4246f63987a54c2535ec95a7de | 68ea05d0d276441cb2d1e39c620d5991e0211b94 | /2144.py | bb654c8a0b7125606aa402a02d74bd202336952f | [] | no_license | mcavalca/uri-python | 286bc43aa157d3a6880dc222e0136c80cf079565 | e22875d2609fe7e215f9f3ed3ca73a1bc2cf67be | refs/heads/master | 2021-11-23T08:35:17.614443 | 2021-10-05T13:26:03 | 2021-10-05T13:26:03 | 131,339,175 | 50 | 27 | null | 2021-11-22T12:21:59 | 2018-04-27T19:54:09 | Python | UTF-8 | Python | false | false | 618 | py | final = 0.0
total = 0
while True:
w1, w2, r = [int(x) for x in input().split()]
if w1 == w2 == r == 0:
break
media = float(((w1 * (1 + r/30))+(w2 * (1 + r/30))))/2.0
final += media
total += 1
if media < 13:
print('Nao vai da nao')
elif media < 14:
print('E 13')
elif media < 40:
print('Bora, hora do show! BIIR!')
elif media < 60:
print('Ta saindo da jaula o monstro!')
else:
print('AQUI E BODYBUILDER!!')
final = final/float(total)
if final > 40:
print()
print('Aqui nois constroi fibra rapaz! Nao e agua com musculo!')
| [
"[email protected]"
] | |
34c124b3a7647f01806bfe8477086b68f63e78b5 | 4a7092876b5057867a1290114e29dfd9fb1c0820 | /fastccd_support_ioc/utils/python2-version/setFCRIC-Normal.py | 28cf771b07ad1017c253270d147325d411a39a06 | [
"BSD-3-Clause"
] | permissive | ihumphrey/fastccd_support_ioc | 2380a9c23037ccb552d00efdb0235b7116e6ea19 | 7cd844102f042bea2fa5a31217e15fd72731b523 | refs/heads/master | 2023-03-03T04:34:39.827326 | 2021-02-08T19:47:09 | 2021-02-08T19:47:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,696 | py | #! /usr/bin/python
# -*- coding: utf-8 -*-
import cin_constants
import cin_register_map
import cin_functions
# Mask Triggers & turn off Bias
# import setTriggerSW
# cin_functions.setCameraOff()
# Clamp Mode registers
# Write clampr
cin_functions.WriteReg("821D", "A000", 0)
cin_functions.WriteReg("821E", "0048", 0)
cin_functions.WriteReg("821F", "00C7", 0)
cin_functions.WriteReg("8001", "0105", 0)
cin_functions.WriteReg("821D", "A000", 0)
cin_functions.WriteReg("821E", "0049", 0)
cin_functions.WriteReg("821F", "004C", 0)
cin_functions.WriteReg("8001", "0105", 0)
# Write clamp
cin_functions.WriteReg("821D", "A000", 0)
cin_functions.WriteReg("821E", "0050", 0)
cin_functions.WriteReg("821F", "00B4", 0)
cin_functions.WriteReg("8001", "0105", 0)
cin_functions.WriteReg("821D", "A000", 0)
cin_functions.WriteReg("821E", "0051", 0)
cin_functions.WriteReg("821F", "0002", 0)
cin_functions.WriteReg("8001", "0105", 0)
# Write ac on
cin_functions.WriteReg("821D", "A000", 0)
cin_functions.WriteReg("821E", "0058", 0)
cin_functions.WriteReg("821F", "0001", 0)
cin_functions.WriteReg("8001", "0105", 0)
cin_functions.WriteReg("821D", "A000", 0)
cin_functions.WriteReg("821E", "0059", 0)
cin_functions.WriteReg("821F", "004C", 0)
cin_functions.WriteReg("8001", "0105", 0)
cin_functions.WriteReg("821D", "A000", 0)
cin_functions.WriteReg("821E", "005A", 0)
cin_functions.WriteReg("821F", "0064", 0)
cin_functions.WriteReg("8001", "0105", 0)
cin_functions.WriteReg("821D", "A000", 0)
cin_functions.WriteReg("821E", "005B", 0)
cin_functions.WriteReg("821F", "005B", 0)
cin_functions.WriteReg("8001", "0105", 0)
# Bias On & allow Ext Triggers
# cin_functions.setCameraOn()
# import setTrigger0
| [
"[email protected]"
] | |
afa1dd6b0f679aa6df6a0a0250b61aa5007a4a21 | 08f5dd97433ce84868dbd95020e49f795e8e3f42 | /website/migrations/0011_auto_20150726_2337.py | 1c3b5f116150b0b516139ab4f7af13d2afd1e2d9 | [] | no_license | katur/forthebirds | f76e9d78f8b71f5cb13f22f3c417e737f6048896 | 2118fabebd8780cd3151f5ddd88245de402590e9 | refs/heads/master | 2023-08-08T18:57:55.722516 | 2023-03-28T03:04:19 | 2023-03-28T03:04:19 | 22,771,365 | 2 | 1 | null | 2023-07-25T21:23:49 | 2014-08-08T20:56:20 | Python | UTF-8 | Python | false | false | 1,219 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('website', '0010_auto_20150104_1404'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='speaking_keynotes',
field=models.TextField(default='', help_text=b'Use Markdown syntax for italics, bullets, etc. See <a href="http://www.darkcoding.net/software/markdown-quick-reference">a quick reference</a>, <a href="http://www.markdowntutorial.com/">a tutorial</a>, or practice <a href="http://dillinger.io/">here</a>. ', blank=True),
preserve_default=False,
),
migrations.AddField(
model_name='userprofile',
name='speaking_testimonials',
field=models.TextField(default='', help_text=b'Use Markdown syntax for italics, bullets, etc. See <a href="http://www.darkcoding.net/software/markdown-quick-reference">a quick reference</a>, <a href="http://www.markdowntutorial.com/">a tutorial</a>, or practice <a href="http://dillinger.io/">here</a>. ', blank=True),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
ad5c1854793eb7ff7a06b89123406bd985a462ea | 3701467a06bc624c9520984bf6bfc71c95d648d6 | /NewModelNetworkKBP/dataElmoKBP.py | 6b2c34b1656164f046b3aa9ac6958b0a01eebfe6 | [] | no_license | llq20133100095/ANA_SL_ELMO | b54ecef3774f0db85a4940ff7a402c7ebc41b9ba | 46ce451e2841cff1978044110330c2218822644a | refs/heads/master | 2020-07-30T05:11:44.519440 | 2020-06-23T06:22:28 | 2020-06-23T06:22:28 | 210,097,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,298 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu May 16 10:47:09 2019
@author: llq
@function:
1.process the KBP dataset
2.concate the "glove embedding"
and "Elmo embedding" and "Pos embedding"
"""
import numpy as np
#from allennlp.commands.elmo import ElmoEmbedder
import re
import time
class ELMO_KBP:
def __init__(self):
"""
0.Word2vec
"""
#word2vec file
self.output_vector_filename=r"../processData/KBP-SF48-master/glove_6B_300vec_kbp.txt"
#Dictory:store the word and vector
self.dict_word_vec={}
#Vector Size
self.vector_size=300
"""
1.(1)Initial max sentence length.
(2)Store the label id.
"""
self.label2id_txt="../processData/KBP-SF48-master/label2id.txt"
self.max_length_sen=82
#label id value: Change the label to id.And 10 classes number(0-9)
self.label2id={}
"""
2.traing filename
"""
#read data
self.train_filename=r"../data/KBP-SF48-master/train_sf3.txt"
#store data
self.train_sen_store_filename=r"../processData/KBP-SF48-master/train_sen.txt"
self.train_label_store_filename=r"../processData/KBP-SF48-master/train_label.txt"
#Postion file
self.training_e1_e2_pos_filename=r"../processData/KBP-SF48-master/training_e1_e2.txt"
"""
3.testing filename
"""
#read data
self.test_filename=r"../data/KBP-SF48-master/test_sf3.txt"
#store data
self.test_sen_store_filename=r"../processData/KBP-SF48-master/test_sen.txt"
self.test_label_store_filename=r"../processData/KBP-SF48-master/test_label.txt"
#Postion file
self.testing_e1_e2_pos_filename=r"../processData/KBP-SF48-master/testing_e1_e2.txt"
"""
4.Position:initial the position vector
"""
self.pos2vec_len=20
self.pos2vec_init=np.random.normal(size=(131,20),loc=0,scale=0.05)
"""
5.Process training data
"""
#training sentence
self.training_sen_number=28888
"""
6.Process testing data
"""
#Testing sentence
self.testing_sen_number=9600 #(9574)
"""
8.SDP file
"""
self.e1_sdp_train_file="../SdpNetwork/sdpData/train_kbp/train_e1_SDP.txt"
self.e2_sdp_train_file="../SdpNetwork/sdpData/train_kbp/train_e2_SDP.txt"
self.e1_sdp_test_file="../SdpNetwork/sdpData/test_kbp/test_e1_SDP.txt"
self.e2_sdp_test_file="../SdpNetwork/sdpData/test_kbp/test_e2_SDP.txt"
"""
9.entity pair embedding
"""
#entity train file
self.entity_train_file="../processData/KBP-SF48-master/training_e1_e2.txt"
#entity test file
self.entity_test_file="../processData/KBP-SF48-master/testing_e1_e2.txt"
"""
10.Elmo save
"""
self.train_elmo_file='./data/train_kbp_elmo_embedding.npy'
self.test_elmo_file='./data/test_kbp_elmo_embedding.npy'
"""
11.Merge Embedding
"""
self.merge_path = './data/merge_embedding'
self.train_split_n = 4
self.test_split_n = 2
self.train_merge_file = 'train_merge_embedding_'
self.test_merge_file = 'test_merge_embedding_'
def dict_word2vec(self):
"""
When create Process_data,must exec this function.
Initial dict_word_vec.
"""
#put the vector in the dictionary
with open(self.output_vector_filename,"r") as f:
i=0
for lines in f.readlines():
if(i==0):
i=i+1
continue
lines_split=lines.split(" ")
keyword=lines_split[0]
lines_split=map(float,lines_split[1:-1])
self.dict_word_vec[keyword]=lines_split
#Set value in "BLANK",its size is 300
self.dict_word_vec["BLANK"]=np.random.normal(size=self.vector_size,loc=0,scale=0.05)
#Set value in "<e1>","</e1>","<e2>","</e2>"
self.dict_word_vec["<e1>"]=np.random.normal(size=self.vector_size,loc=0,scale=0.05)
self.dict_word_vec["</e1>"]=np.random.normal(size=self.vector_size,loc=0,scale=0.05)
self.dict_word_vec["<e2>"]=np.random.normal(size=self.vector_size,loc=0,scale=0.05)
self.dict_word_vec["</e2>"]=np.random.normal(size=self.vector_size,loc=0,scale=0.05)
def label2id_init(self):
"""
When create Process_data,must exec this function.
Change the traing label value to id.
"""
with open(self.label2id_txt,"r") as f:
for lines in f.readlines():
lines=lines.strip("\r\n").split()
self.label2id[lines[0]]=lines[1]
#embedding the position
def pos_embed(self,x):
if x < -64:
return 0
if x >= -64 and x <= 64:
return x+65
if x > 64:
return 130
def embedding_lookup(self,sen_store_filename,e1_e2_pos_filename,sen_number):
"""
1.sen_list2D:put sentence in this.format:[[sentence1],[sentence2]]
2.word_vec3D:get each word vector,and Make data to this format:8000*105*300.
In 105*300,the first dim is word;the sencond dim is vector
3.word_pos_vec3D:has "word vector" and "position vector".
this format is N*105*320,(N has two value "8000" and "2717")
"""
word_vec3D=np.empty((sen_number,self.max_length_sen,self.vector_size))
# word_pos_vec3D=np.empty((sen_number,self.max_length_sen,340))
#sen_list:store the sentence([[sentence1],[sentence2]] )
sen_list2D=[]
#sen_length:length of sentence
sen_length=[]
#load the word in sen_list2D.
#The format is:[[sentence1],[sentence2]]
with open(sen_store_filename,"r") as f:
sentence_id=0
for lines in f.readlines():
lines=lines.replace(" "," ").replace(" "," ")\
.replace(" "," ").replace(" "," ").split(" ")[:-1]
#Remove the stare " "
if(lines[0]==""):
lines=lines[1:]
#store the original length of sentence
sen_length.append(len(lines))
sentence_id=sentence_id+1
#append the length of sen_list2D to 105 lengths.
#And the flag is 'BLANK'
if(len(lines)<=self.max_length_sen):
for i in range(self.max_length_sen-len(lines)):
lines.append('BLANK')
sen_list2D.append(lines)
#Find the word vector in dict_word_vec.
#Make data to this format:N*105*300,(N has two value "8000" and "2717")
#In 105*300,the first dim is "word";the sencond dim is "vector"
sentence_id=0
for sentences in sen_list2D:
word_id=0
for words in sentences:
#find word in dict_word_vec
if(self.dict_word_vec.has_key(words)):
word_vec3D[sentence_id][word_id]=self.dict_word_vec[words]
word_id=word_id+1
else:
self.dict_word_vec[words]=np.random.normal(size=(1,self.vector_size),loc=0,scale=0.05)
word_vec3D[sentence_id][word_id]=self.dict_word_vec[words]
word_id=word_id+1
# print "Warning: don't find word in dict_word_vec"
sentence_id=sentence_id+1
#Get the "realtion word"-"other word" in this.
#pos_id format:N*105*2,(N has two value "8000" and "2717")
#And 105(word)*2(id):
# [pos_id1,pos_id2],
# [pos_id1,pos_id2],
# [pos_id1,pos_id2],
# [pos_id1,pos_id2],
pos_id=np.empty((sen_number,self.max_length_sen,2))
sentence_id=0
with open(e1_e2_pos_filename,"r") as f:
for lines in f.readlines():
#the two "relation word":e1,e2
e1=lines.split("<e>")[0].split(" ")[1:]
e2=lines.split("<e>")[1].strip("\n").split(" ")
#Position number of e1 and e2
pos_e1=0
pos_e2=0
#If entity word has two number and more,set this "pos_e1" and "pos_e2" are the 1st word in entity word
for i in range(len(sen_list2D[sentence_id])):
if(sen_list2D[sentence_id][i]==e1[-2] and sen_list2D[sentence_id][i+1]=="</e1>"):
pos_e1=i
if(sen_list2D[sentence_id][i]==e2[-1] and sen_list2D[sentence_id][i+1]=="</e2>"):
pos_e2=i
for i in range(len(sen_list2D[sentence_id])):
if(i==pos_e1):
pos_id[sentence_id][i]=\
np.array([self.pos_embed(0),self.pos_embed(i-pos_e2)])
elif(i==pos_e2):
pos_id[sentence_id][i]=\
np.array([self.pos_embed(i-pos_e1),self.pos_embed(0)])
else:
pos_id[sentence_id][i]=\
np.array([self.pos_embed(i-pos_e1),self.pos_embed(i-pos_e2)])
sentence_id=sentence_id+1
#Set the "position word" to vector.
#pos_vec:N(sentence)*105(word)*20(position vector),(N has two value "8000" and "2717")
pos_vec=np.empty((sen_number,self.max_length_sen,40))
sentence_id=0
for word in pos_id:
i=0
for pos_num in word:
pos_vec[sentence_id][i]=np.hstack\
((self.pos2vec_init[int(pos_num[0])],self.pos2vec_init[int(pos_num[1])]))
i=i+1
sentence_id=sentence_id+1
return word_vec3D, pos_vec, sen_length, sen_list2D
def sentence_list(self,sen_store_filename):
"""
1.sen_list2D:put sentence in this.format:[[sentence1],[sentence2]]
"""
#sen_list:store the sentence([[sentence1],[sentence2]] )
sen_list2D=[]
#sen_length:length of sentence
sen_length=[]
pattern = u' +'
#load the word in sen_list2D.
#The format is:[[sentence1],[sentence2]]
with open(sen_store_filename,"r") as f:
sentence_id=0
for lines in f.readlines():
lines = re.sub(pattern, ' ', lines)
lines=lines.split(" ")[:-1]
#Remove the stare " "
if(lines[0]==""):
lines=lines[1:]
#store the original length of sentence
sen_length.append(len(lines))
sentence_id=sentence_id+1
#append the length of sen_list2D to 105 lengths.
#And the flag is 'BLANK'
if(len(lines)<=self.max_length_sen):
for i in range(self.max_length_sen-len(lines)):
lines.append('BLANK')
sen_list2D.append(lines)
return sen_list2D
"""
#use the python3
def embedding_lookup_in_elmo(self,sen_list2D):
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5"
fin_embedding=np.zeros((len(sen_list2D),self.max_length_sen,1024))
elmo = ElmoEmbedder(options_file, weight_file)
for i in range(len(sen_list2D)):
print('iter: %d'%(i))
elmo_embedding, elmo_mask = elmo.batch_to_embeddings(sen_list2D[i:i+1])
#select the last layer as embedding
elmo_embedding=np.array(elmo_embedding[0][2])
fin_embedding[i]=elmo_embedding
return fin_embedding
"""
def merge_glove_elmo(self, word_pos_vec3D, pos_vec, elmo_file):
"""
Function:
1.merge the word_pos_vec3D and elmo_embedding
2.word_pos_vec3D: [glove embedding, position embedding]
Parameter:
1.word_pos_vec3D: embedding
2.elmo_file: save the ELMO embedding
"""
elmo_embedding=np.load(elmo_file)
word_vec3D = np.concatenate((word_pos_vec3D, elmo_embedding, pos_vec), axis=2)
return word_vec3D
def embedding_looking_root_e1_e2(self,e1_sdp_file,e2_sdp_file,sen_number,sen_list2D,elmo_file):
"""
Function:
embedding the "root" and e1 and e2
"""
#store the root word
root_list=[]
#store the e1 word
e1_list=[]
with open(e1_sdp_file,"r") as f:
for lines in f.readlines():
root=lines.split(" ")[0].replace("'","")
#get the format such as "book-crossing"
if "-" in root:
root=root.split("-")[1]
# #get the format such as "nt"
# if root=="nt":
# root="t"
# if root=="and/or":
# root="and"
# if root=="ta":
# root="gotta"
# if root=="%":
# root="95%"
e1=lines.strip("\r\n").split(" ")[-2]
root_list.append(root)
e1_list.append(e1)
#store the e2 word
e2_list=[]
with open(e2_sdp_file,"r") as f:
for lines in f.readlines():
e2=lines.strip("\r\n").split(" ")[-2]
e2_list.append(e2)
#load the elmo_embedding
elmo_embedding=np.load(elmo_file)
#root embedding and elmo_embedding
root_embedding=np.zeros((sen_number,self.vector_size+1024))
sen_num=0
for root in root_list:
try:
index=sen_list2D[sen_num].index(root)
elmo=elmo_embedding[sen_num][index]
except:
elmo=np.random.normal(size=(1024,),loc=0,scale=0.05)
try:
root_embedding[sen_num]=np.concatenate((self.dict_word_vec[root],elmo),axis=0)
except:
self.dict_word_vec[root]=np.random.normal(size=(self.vector_size,),loc=0,scale=0.05)
root_embedding[sen_num]=np.concatenate((self.dict_word_vec[root],elmo),axis=0)
sen_num+=1
#e1 embedding
e1_embedding=np.zeros((sen_number,self.vector_size+1024))
sen_num=0
for e1 in e1_list:
try:
index=sen_list2D[sen_num].index(e1)
elmo=elmo_embedding[sen_num][index]
except:
elmo=np.random.normal(size=(1024,),loc=0,scale=0.05)
try:
e1_embedding[sen_num]=np.concatenate((self.dict_word_vec[e1],elmo),axis=0)
except:
self.dict_word_vec[e1]=np.random.normal(size=(self.vector_size,),loc=0,scale=0.05)
e1_embedding[sen_num]=np.concatenate((self.dict_word_vec[e1],elmo),axis=0)
sen_num+=1
#e2 embedding
e2_embedding=np.zeros((sen_number,self.vector_size+1024))
sen_num=0
for e2 in e2_list:
try:
index=sen_list2D[sen_num].index(e2)
elmo=elmo_embedding[sen_num][index]
except:
elmo=np.random.normal(size=(1024,),loc=0,scale=0.05)
try:
e2_embedding[sen_num]=np.concatenate((self.dict_word_vec[e2],elmo),axis=0)
except:
self.dict_word_vec[e2]=np.random.normal(size=(self.vector_size,),loc=0,scale=0.05)
e2_embedding[sen_num]=np.concatenate((self.dict_word_vec[e2],elmo),axis=0)
sen_num+=1
#set position embedding in root,e1 and e2
root_pos_emb=np.zeros((sen_number,self.pos2vec_len*2))
e1_pos_emb=np.zeros((sen_number,self.pos2vec_len*2))
e2_pos_emb=np.zeros((sen_number,self.pos2vec_len*2))
for sentence_id in range(len(sen_list2D)):
#Position number of root, e1 and e2
pos_root=0
pos_e1=0
pos_e2=0
#If entity word has two number and more,set this "pos_e1" and "pos_e2" are the 1st word in entity word
for i in range(len(sen_list2D[sentence_id])):
if(sen_list2D[sentence_id][i]==root_list[sentence_id]):
pos_root=i
if(sen_list2D[sentence_id][i]==e1_list[sentence_id] and sen_list2D[sentence_id][i+1]=="</e1>"):
pos_e1=i
if(sen_list2D[sentence_id][i]==e2_list[sentence_id] and sen_list2D[sentence_id][i+1]=="</e2>"):
pos_e2=i
root_pos_emb[sentence_id]=np.hstack\
((self.pos2vec_init[int(self.pos_embed(pos_root-pos_e1))],self.pos2vec_init[int(self.pos_embed(pos_root-pos_e2))]))
e1_pos_emb[sentence_id]=np.hstack\
((self.pos2vec_init[int(self.pos_embed(0))],self.pos2vec_init[int(self.pos_embed(pos_e1-pos_e2))]))
e2_pos_emb[sentence_id]=np.hstack\
((self.pos2vec_init[int(self.pos_embed(pos_e2-pos_e1))],self.pos2vec_init[int(self.pos_embed(0))]))
#concate word embedding and pos embedding
root_embedding=np.concatenate((root_embedding,root_pos_emb),axis=1)
e1_embedding=np.concatenate((e1_embedding,e1_pos_emb),axis=1)
e2_embedding=np.concatenate((e2_embedding,e2_pos_emb),axis=1)
return np.float32(root_embedding),np.float32(e1_embedding),np.float32(e2_embedding)
def iterate_minibatches_inputAttRootE1E2(self, inputs, targets, sen_length, batchsize, input_root, input_e1, input_e2, shuffle=False):
"""
Get minibatches in input attention
"""
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt], sen_length[excerpt], input_root[excerpt], input_e1[excerpt], input_e2[excerpt]
def iterate_minibatches(self, inputs, targets, sen_length, batchsize, shuffle=False):
"""
Get minibatches
"""
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt], sen_length[excerpt]
def mask_train_input(self,y_train,num_labels='all'):
"""
2.Mask_train:mask train label.When mask=1,label can be supervised.When mask=0,label can be unsupervised.
"""
# Construct mask_train. It has a zero where label is unknown, and one where label is known.
if num_labels == 'all':
# All labels are used.
mask_train = np.ones(len(y_train), dtype=np.float32)
print("Keeping all labels.")
else:
#Rough classification
rou_num_classes=10
# Assign labels to a subset of inputs.
max_count = num_labels // rou_num_classes
print("Keeping %d labels per rough class." % max_count)
mask_train = np.zeros(len(y_train), dtype=np.float32)
count = [0] * rou_num_classes
for i in range(len(y_train)):
label = y_train[i]
rou_label=int(label)/2
if (count[rou_label]) < max_count:
mask_train[i] = 1.0
count[rou_label] += 1
return mask_train
def label2id_in_data(self,label_store_filename,sen_number):
"""
In train or test data,change the traing label value to id.
"""
data_label=np.empty((sen_number)).astype(int)
label_number=0
with open(label_store_filename,"r") as f:
for lines in f.readlines():
data_label[label_number]=self.label2id[lines.strip("\r\n")]
label_number=label_number+1
return data_label
def label2id_1hot(self,data_label,label2id):
"""
Make the label in one-hot encode:[0,0,...,0,1,0,0,...,0]
"""
onehot_encoded=[]
for value in data_label:
onehot=np.zeros((len(label2id)))
onehot[value]=1
onehot_encoded.append(onehot)
return np.array(onehot_encoded)
if __name__ == "__main__":
"""
1.init the ELMO_KBP
"""
elmo_kbp = ELMO_KBP()
start_time = time.time()
"""
2.load the dict word2vec
"""
elmo_kbp.dict_word2vec()
elmo_kbp.label2id_init()
print("load the dict word2vec: %f s" % (time.time() - start_time))
# """
# 3.load the ELMO embedding
# """
# #train elmo data
# train_sen_list2D = elmo_kbp.sentence_list(elmo_kbp.train_sen_store_filename)
# train_elmo_embedding = elmo_kbp.embedding_lookup_in_elmo(train_sen_list2D)
# np.save(elmo_kbp.train_elmo_file, train_elmo_embedding)
#
# #test elmo data
# test_sen_list2D = elmo_kbp.sentence_list(elmo_kbp.test_sen_store_filename)
# test_elmo_embedding = elmo_kbp.embedding_lookup_in_elmo(test_sen_list2D)
# np.save(elmo_kbp.test_elmo_file, test_elmo_embedding)
"""
4.load the glove embedding
"""
#traing_word_pos_vec3D:training data
training_word_pos_vec3D, train_pos_vec, training_sen_length,train_sen_list2D=\
elmo_kbp.embedding_lookup(elmo_kbp.train_sen_store_filename,\
elmo_kbp.training_e1_e2_pos_filename,elmo_kbp.training_sen_number)
training_word_pos_vec3D=np.float32(training_word_pos_vec3D)
training_sen_length=np.int32(np.array(training_sen_length))
print("load the train glove embedding: %f s" % (time.time() - start_time))
#testing_word_pos_vec3D:testing data
testing_word_pos_vec3D, test_pos_vec, testing_sen_length,test_sen_list2D=\
elmo_kbp.embedding_lookup(elmo_kbp.test_sen_store_filename,\
elmo_kbp.testing_e1_e2_pos_filename,elmo_kbp.testing_sen_number)
testing_word_pos_vec3D=np.float32(testing_word_pos_vec3D)
testing_sen_length=np.int32(np.array(testing_sen_length))
print("load the test glove embedding: %f s" % (time.time() - start_time))
"""
5.merge the all embedding
"""
training_word_pos_vec3D = elmo_kbp.merge_glove_elmo(training_word_pos_vec3D, train_pos_vec, elmo_kbp.train_elmo_file)
del train_pos_vec
testing_word_pos_vec3D = elmo_kbp.merge_glove_elmo(testing_word_pos_vec3D, test_pos_vec, elmo_kbp.test_elmo_file)
del test_pos_vec
print("merge the all embedding: %f s" % (time.time() - start_time))
"""
6.load the label
"""
#4.training label
training_label=elmo_kbp.label2id_in_data(elmo_kbp.train_label_store_filename,\
elmo_kbp.training_sen_number)
training_label=np.int32(training_label)
#5.testing label
testing_label=elmo_kbp.label2id_in_data(elmo_kbp.test_label_store_filename,\
elmo_kbp.testing_sen_number)
testing_label=np.int32(testing_label)
"""
7.load the embedding of root, e1 and e2.
"""
train_root_embedding, train_e1_embedding, train_e2_embedding = \
elmo_kbp.embedding_looking_root_e1_e2(elmo_kbp.e1_sdp_train_file,\
elmo_kbp.e2_sdp_train_file, elmo_kbp.training_sen_number, train_sen_list2D, elmo_kbp.train_elmo_file)
test_root_embedding, test_e1_embedding, test_e2_embedding=\
elmo_kbp.embedding_looking_root_e1_e2(elmo_kbp.e1_sdp_test_file,\
elmo_kbp.e2_sdp_test_file, elmo_kbp.testing_sen_number, test_sen_list2D, elmo_kbp.test_elmo_file)
"""
8.label id value and one-hot
"""
label2id = elmo_kbp.label2id
training_label_1hot = elmo_kbp.label2id_1hot(training_label, label2id)
training_label_1hot = np.int32(training_label_1hot)
testing_label_1hot = elmo_kbp.label2id_1hot(testing_label, label2id)
testing_label_1hot = np.int32(testing_label_1hot)
del training_label
del testing_label
| [
"[email protected]"
] | |
d3e761fd33793aa11b6438e8a85ee6b8d49d9f26 | bd02997a44218468b155eda45dd9dd592bb3d124 | /baekjoon_1149.py | 4e0e23db2a3600c59ccc9ab1de7704622b137d4c | [] | no_license | rheehot/ProblemSolving_Python | 88b1eb303ab97624ae6c97e05393352695038d14 | 4d6dc6aea628f0e6e96530646c66216bf489427f | refs/heads/master | 2023-02-13T03:30:07.039231 | 2021-01-04T06:04:11 | 2021-01-04T06:04:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | '''
Problem Solving Baekjoon 1149
Author: Injun Son
Date: September 23, 2020
'''
import sys
import copy
from itertools import combinations
from collections import deque
import math
N = int(input())
cost = []
for _ in range(N):
r, g, v = map(int, input().split())
cost.append([r, g, v])
'''
dp[i][0] = i๋ฒ์งธ ์ง์ r๋ก ์น ํ ๋ ์ต์ ๊ฐ = cost[i][0]+ min(dp[i-1][1], dp[i-1][2] )
dp[i][1] = i๋ฒ์งธ ์ง์ g๋ก ์น ํ ๋ ์ต์ ๊ฐ = cost[i][1]+ min(dp[i-1][0], dp[i-1][2] )
dp[i][2] = i๋ฒ์งธ ์ง์ b๋ก ์น ํ ๋ ์ต์ ๊ฐ = cost[i][2]+ min(dp[i-1][0], dp[i-1][1] )
'''
dp = [ [0,0,0] for _ in range(N+1)]
dp[0][0] = cost[0][0]
dp[0][1] = cost[0][1]
dp[0][2] = cost[0][2]
for i in range(1, N):
dp[i][0] = min(dp[i-1][1], dp[i-1][2])+cost[i][0]
dp[i][1] = min(dp[i - 1][0], dp[i - 1][2]) + cost[i][1]
dp[i][2] = min(dp[i - 1][0], dp[i - 1][1]) + cost[i][2]
print(min(dp[N-1])) | [
"[email protected]"
] | |
6298ae66b2659ba754329d0314f6849ce42e0261 | 0995f4b2a0db3fe88e68862c4e3125becfb5f8af | /scripts/generate_pairs2_cacd.py | dfa29e0f924d62d97e13bde82bb8b16490216b2b | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | phymhan/face-aging | db010df62b281befeb1149085ba865382637e3f8 | 2970793d85f2502929222ca7269fb427afee71c1 | refs/heads/master | 2020-03-22T00:33:19.686177 | 2018-09-18T13:17:48 | 2018-09-18T13:17:48 | 139,252,060 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,730 | py | # datafile: A B 0/1/2
# label: 0: A < B, 1: A == B, 2: A > B
import os
import random
import argparse
random.seed(0)
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='train')
parser.add_argument('--N', type=int, default=140000)
parser.add_argument('--margin', type=int, default=10)
opt = parser.parse_args()
def parse_age_label(fname, binranges):
strlist = fname.split('_')
age = int(strlist[0])
l = None
for l in range(len(binranges)-1):
if (age >= binranges[l]) and (age < binranges[l+1]):
break
return l
def parse_age(fname):
strlist = fname.split('_')
age = int(strlist[0])
return age
# root = '/media/ligong/Toshiba/Datasets/CACD/CACD_cropped2_400'
mode = opt.mode
src = '../sourcefiles/CACD_'+mode+'_10k.txt'
N = opt.N
with open(src, 'r') as f:
fnames = f.readlines()
fnames = [fname.rstrip('\n') for fname in fnames]
def label_fn(a1, a2, m):
if abs(a1-a2) <= m:
return 1
elif a1 < a2:
return 0
else:
return 2
cnt = [0, 0, 0]
random.shuffle(fnames)
with open(mode+'_pairs_m%d_cacd_10k2.txt'%opt.margin, 'w') as f:
for _ in range(N):
# idx = _ % N
# name1 = fnames[idx]
# name2 = random.choice(fnames)
# if random.random() < 0.5:
# tmp = name1
# name1 = name2
# name2 = tmp
ss = random.sample(fnames, 2)
name1 = ss[0].rstrip('\n')
name2 = ss[1].rstrip('\n')
label = label_fn(parse_age(name1), parse_age(name2), opt.margin)
cnt[label] += 1
f.write('%s %s %d\n' % (name1, name2, label))
w = []
for c in cnt:
w.append(1.0 * sum(cnt) / c)
print([x/sum(w) for x in w])
| [
"[email protected]"
] | |
fcf5cdd7421b4f2532a2e661e5f029b817329d95 | dd681dd7874c80c2804ca8d66cdbfdf2abec537e | /Python/venv/Lib/site-packages/tensorflow/keras/datasets/boston_housing/__init__.py | 53ffc7b9cca8e7eac1826d4cf34ba9db26f68fff | [] | no_license | khaled147/Koneked | cbbaec78cf3828575e835445f45b9dd72c39d808 | 98bdc701a3d126c742e076ee3ad34719a0ac5309 | refs/heads/main | 2023-04-03T11:37:07.941179 | 2021-04-14T02:09:35 | 2021-04-14T02:09:35 | 345,202,202 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | # This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Boston housing price regression dataset.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.keras.datasets.boston_housing import load_data
del _print_function
| [
"[email protected]"
] | |
8f53c74814241b9df893b923178de00b3e5b2f16 | ec0fb2acbe70d3d7f399aea42038221298c8268e | /part010/ch05_shapely/sec6_interop/test_3_geo_inter_x_x.py | 6d92bbb779c365957775521a98a907196adfb01c | [] | no_license | GAIMJKP/book_python_gis | fa09567337bfccd4ab968228d4890ec0538ada50 | cd09be08df4cf4d3e06cf7c43d0b80cc76976a7e | refs/heads/master | 2022-11-07T18:32:22.340481 | 2020-06-20T13:22:58 | 2020-06-20T13:22:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
from shapely.geometry import asShape
d = {"type": "Point", "coordinates": (0.0, 0.0)}
shape = asShape(d)
shape.geom_type
tuple(shape.coords)
list(shape.coords)
###############################################################################
class GeoThing(object):
def __init__(self, d):
self.__geo_interface__ = d
###############################################################################
thing = GeoThing(d)
shape = asShape(thing)
shape.geom_type
tuple(shape.coords)
list(shape.coords)
###############################################################################
from shapely.geometry import mapping
thing = GeoThing(d)
m = mapping(thing)
type(m)
m['type']
| [
"[email protected]"
] | |
0134f050e7db3b58bc21acea96931a03d5ce5775 | a26ae51a1d84249c31c58b90231b7ec23e1aa74d | /flask_app.py | e75e4e1d9083d00221068309ed9821d50809fb02 | [] | no_license | Yaomingqing/Image-Super-Resolution | b5e975f08d9cec0d1ba71ec3489e388c6ef69a2a | 631b2af81d012ff58c9d7a91f37e3e1d31377222 | refs/heads/master | 2021-08-10T13:05:44.662484 | 2017-11-12T15:58:48 | 2017-11-12T15:58:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,187 | py | from keras.models import load_model
from flask import Flask, request, render_template, flash, redirect, url_for
from werkzeug.utils import secure_filename
import models
import os
import tensorflow as tf
upload_folder = 'data/'
if not os.path.exists(upload_folder):
os.makedirs(upload_folder)
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'bmp'}
model = load_model('keras_models/RNSR_model.h5')
with tf.device('/cpu:0'):
m = models.ResNetSR(2)
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = upload_folder
app.secret_key = 'WTF_I_already*Installed^%Open%&$CV'
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def root():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('uploaded_file',
filename=filename))
return render_template('index.html', title='Image Super Resolution')
@app.route('/uploaded_file/<string:filename>')
def uploaded_file(filename):
path = upload_folder + filename
try:
m.upscale(path, save_intermediate=False, mode="fast")
ext = filename.rsplit('.', 1)[1]
path = upload_folder + filename.rsplit('.', 1)[0] + "_scaled(2x)." + ext
return redirect(url_for('image', filename=path))
except:
flash("Image is too large !")
return redirect('/')
@app.route('/image/<filename>', methods=['POST'])
def image(filename):
return render_template('disp.html', image=filename)
if __name__ == "__main__":
app.run(port=8888) | [
"[email protected]"
] | |
12654f11056f73cda0eb1e3ff7d062af58f8d11c | 90fb55320c81259cb199b9a8900e11b2ba63da4f | /232/gold.py | 2b2c82b71ecab0bffd80f1f0592f28ec519fc32d | [] | no_license | pogross/bitesofpy | f9bd8ada790d56952026a938b1a34c20562fdd38 | 801f878f997544382e0d8650fa6b6b1b09fa5b81 | refs/heads/master | 2020-05-19T07:31:44.556896 | 2020-01-26T12:48:28 | 2020-01-26T12:48:28 | 184,899,394 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,068 | py | from itertools import tee
from dataclasses import dataclass
# https://pkgstore.datahub.io/core/gold-prices/annual_csv/data/343f626dd4f7bae813cfaac23fccd1bc/annual_csv.csv
gold_prices = """
1950-12,34.720 1951-12,34.660 1952-12,34.790 1953-12,34.850 1954-12,35.040
1955-12,34.970 1956-12,34.900 1957-12,34.990 1958-12,35.090 1959-12,35.050
1960-12,35.540 1961-12,35.150 1962-12,35.080 1963-12,35.080 1964-12,35.120
1965-12,35.130 1966-12,35.180 1967-12,35.190 1968-12,41.113 1969-12,35.189
1970-12,37.434 1971-12,43.455 1972-12,63.779 1973-12,106.236 1974-12,183.683
1975-12,139.279 1976-12,133.674 1977-12,160.480 1978-12,207.895 1979-12,463.666
1980-12,596.712 1981-12,410.119 1982-12,444.776 1983-12,388.060 1984-12,319.622
1985-12,321.985 1986-12,391.595 1987-12,487.079 1988-12,419.248 1989-12,409.655
1990-12,378.161 1991-12,361.875 1992-12,334.657 1993-12,383.243 1994-12,379.480
1995-12,387.445 1996-12,369.338 1997-12,288.776 1998-12,291.357 1999-12,283.743
2000-12,271.892 2001-12,275.992 2002-12,333.300 2003-12,407.674 2004-12,442.974
2005-12,509.423 2006-12,629.513 2007-12,803.618 2008-12,819.940 2009-12,1135.012
2010-12,1393.512 2011-12,1652.725 2012-12,1687.342 2013-12,1221.588 2014-12,1200.440
2015-12,1068.317 2016-12,1152.165 2017-12,1265.674 2018-12,1249.887
""" # noqa E501
@dataclass
class GoldPrice:
year: int
price: float
change: float = 0
def pairwise(iterable):
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def years_gold_value_decreased(gold_prices: str = gold_prices) -> (int, int):
"""Analyze gold_prices returning a tuple of the year the gold price
decreased the most and the year the gold price increased the most.
"""
prices = [
GoldPrice(year=int(entry.split(",")[0][:4]), price=float(entry.split(",")[1]))
for entry in " ".join(gold_prices.splitlines()).strip().split(" ")
]
for first, second in pairwise(prices):
second.change = first.price - second.price
prices.sort(key=lambda x: x.change)
return prices[-1].year, prices[0].year
| [
"[email protected]"
] | |
157d1915be5de8fd962c5458f9608cfa50c53211 | 35b58dedc97622b1973456d907ede6ab86c0d966 | /Test/2020ๅนด6ๆ20ๆฅ/selenium็ฌๅๅจๆๅ ่ฝฝๆฐๆฎ.py | 75b922d51f77c8fd0aec94d57a25352626e16274 | [] | no_license | GithubLucasSong/PythonProject | 7bb2bcc8af2de725b2ed9cc5bfedfd64a9a56635 | e3602b4cb8af9391c6dbeaebb845829ffb7ab15f | refs/heads/master | 2022-11-23T05:32:44.622532 | 2020-07-24T08:27:12 | 2020-07-24T08:27:12 | 282,165,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | from selenium import webdriver
from lxml import etree
from time import sleep
bro = webdriver.Chrome(executable_path='chromedriver')
# bro = webdriver.Edge(executable_path='./msedgedriver')
bro.get('http://125.35.6.84:81/xk/')
sleep(1)
# ่ทๅ้กต้ขๆบ็ ๅ
ๅฎน
page_text = bro.page_source
all_page_text = [page_text]
for i in range(5):
next_page_btn = bro.find_element_by_xpath('//*[@id="pageIto_next"]')
next_page_btn.click()
sleep(1)
all_page_text.append(bro.page_source)
for page_text in all_page_text:
tree = etree.HTML(page_text)
li_list = tree.xpath('//*[@id="gzlist"]/li')
for li in li_list:
title = li.xpath('./dl/@title')[0]
print(title)
bro.quit()
| [
"[email protected]"
] | |
0e704a2a55c9e5385fe8629bf8951a4746839574 | 0129b016055daa1aaa1e9e0911f271fa7b38e27e | /programacao_estruturada/20192_166/volume_circunferencia.py | 7eeb20de3039fe9af93f2fd698bac1b55a04a7d5 | [] | no_license | rogeriosilva-ifpi/teaching-tds-course | 7c43ff17d6677aef7b42071929b3de8361748870 | 771ccdc4dc932d0ef5ce6ba61a02b5ee11920d4c | refs/heads/master | 2022-04-04T01:08:45.157185 | 2020-01-30T19:36:57 | 2020-01-30T19:36:57 | 206,439,119 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | # entrada
raio = int(input('Raio: '))
# processamento
pi = 3.14
volume = (4 * pi * raio) / 3
# saida
print('Volume:', volume) | [
"[email protected]"
] | |
abfcd32e8c71bff43c8a98c626c2fe7d9afc2b6c | 8fc7635b84b42e61b7efb9eaf7215394b5b5790a | /aliennor-backend copy/aliennorDjangoBackend/aliennorDjangoBackend/settings.py | cb042666939a3793989e062b84e22ccf1baf9c76 | [] | no_license | phamcong/aliennor-platform | f1e8470aab7ed634859e071f6028931f576ddf3e | e1d71532426ac9414d2158d50ee34c32257618f0 | refs/heads/master | 2021-05-14T17:08:08.629564 | 2018-02-17T23:35:07 | 2018-02-17T23:35:07 | 116,038,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,851 | py | """
Django settings for aliennorDjangoBackend project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import base64
import sys
from urllib import parse
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'sqqh)7k)(q1jl7t(1^em(_1c*!2_tf(d66s79vhn_*qd21gx&_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False if 'DATABASE_URL' in os.environ else True
ALLOWED_HOSTS = [
'localhost'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ecocases',
'rest_framework',
'crispy_forms',
'tinymce',
'corsheaders',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'aliennorDjangoBackend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'aliennorDjangoBackend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
# Setup for MySQL connection
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'aliennor',
'USER': 'root',
'PASSWORD': 'root',
'HOST': '127.0.0.1'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
CSRF_COOKIE_SECURE = False
CSRF_TRUSTED_ORIGINS = ['django-angular2-movies.firebaseapp.com']
# custom settings
JWT_SECRET = base64.b64encode(b'ScaredCherriesEatSurelySimpleVulcansParticipateIntensely')
# heroku database settings
# Register database schemes in URLs.
parse.uses_netloc.append('mysql')
try:
# Check to make sure DATABASES is set in settings.py file.
# If not default to {}
if 'DATABASES' not in locals():
DATABASES = {}
if 'DATABASE_URL' in os.environ:
url = parse.urlparse(os.environ['DATABASE_URL'])
# Ensure default database exists.
DATABASES['default'] = DATABASES.get('default', {})
# Update with environment configuration.
DATABASES['default'].update({
'NAME': url.path[1:],
'USER': url.username,
'PASSWORD': url.password,
'HOST': url.hostname,
'PORT': url.port,
})
if url.scheme == 'mysql':
DATABASES['default']['ENGINE'] = 'django.db.backends.mysql'
except Exception:
print('Unexpected error:', sys.exc_info())
CORS_ORIGIN_WHITELIST = (
'localhost:3000',
'localhost:4200'
) | [
"[email protected]"
] | |
5b1f9fa94e913fb8469bb608f7aff55cd882ab1f | c227735e87fe9da845e81c994f5a0c4cc410abf9 | /Python/ReverseString.py | 378dd1708951b66f59c8040961793d275e8f5106 | [
"MIT"
] | permissive | ani03sha/potd | 816d5a25c9658eb38dda46352d7518d999a807f2 | 05ca25039c5462b68dae9d83e856dd4c66e7ab63 | refs/heads/main | 2023-02-02T18:14:43.273677 | 2020-12-18T15:31:06 | 2020-12-18T15:31:06 | 315,034,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | """
Given a string, reverse all of its characters and return the resulting string.
"""
from typing import List
class ReverseString:
def reverseString(self, s: List[str]) -> List:
# Reversed string
reversedString = ""
# Loop for all characters in the string
for i in s:
reversedString = i + reversedString
return reversedString
if __name__ == "__main__":
r = ReverseString()
print(r.reverseString("Cat"))
print(r.reverseString("Program of the day"))
print(r.reverseString("red quark"))
print(r.reverseString("level"))
print(r.reverseString(""))
| [
"[email protected]"
] | |
f374671883af08c4b50ac4f41ea90214c848b1a7 | c380976b7c59dadaccabacf6b541124c967d2b5a | /.history/src/data/data_20191018141356.py | 20f7b6d5858a6e20347e611529877edb27123cb8 | [
"MIT"
] | permissive | bkraft4257/kaggle_titanic | b83603563b4a3c995b631e8142fe72e1730a0e2e | f29ea1773773109a867278c001dbd21a9f7b21dd | refs/heads/master | 2020-08-17T12:45:28.653402 | 2019-11-15T16:20:04 | 2019-11-15T16:20:04 | 215,667,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,042 | py | import pandas as pd
from typing import Union
from pathlib import Path
from nameparser import HumanName
class ExtractData:
title_translator = {
"Mlle.": "Mrs.",
"Mme.": "Mrs.",
"Sir.": "Mr.",
"Ms.": "Mrs.",
"": "Mr.",
"Col.": "Mr.",
"Capt.": "Mr.",
"Lady.": "Mrs.",
"the Countess. of": "Mrs.",
}
def __init__(self, filename: Union[str, Path], drop_columns=None):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.filename = filename
self.drop_columns = drop_columns
self.all_label_columns = ["survived"]
self.all_feature_columns = [
"pclass",
"name",
"sex",
"age",
"sibsp",
"parch",
"ticket",
"fare",
"cabin",
"embarked",
]
self.Xy_raw = None
self.Xy = None
self.extract_raw()
self.Xy = self.Xy_raw.copy()
self.extract_title()
self.extract_last_name()
def extract_raw(self):
"""
Extracts data from a CSV file.
Returns:
pd.DataFrame -- [description]
"""
Xy_raw = pd.read_csv(self.filename)
Xy_raw.columns = Xy_raw.columns.str.lower().str.replace(" ", "_")
Xy_raw = Xy_raw.rename(columns={'age':'age_known'})
Xy_raw["pclass"] = Xy_raw["pclass"].astype("category")
self.Xy_raw = Xy_raw.set_index("passengerid")
def extract_cabin_prefix(self):
Xy['cabin_number'] = Xy.ticket.str.extract('(\d+)$')
Xy['cabin_prefix'] = Xy.ticket.str.extract('^(.+) ')
def extract_title(self):
"""[summary]
"""
self.Xy["title"] = (
self.Xy.name.apply(lambda x: HumanName(x).title)
.replace(self.title_translator)
.replace({"\.": ""}, regex=True)
)
def extract_last_name(self):
self.Xy["last_name"] = self.Xy.name.apply(lambda x: HumanName(x).last)
def clean(self,):
"""Clean data to remove missing data and "unnecessary" features.
Arguments:
in_raw_df {pd.DataFrame} -- Dataframe containing all columns and rows Kaggle Titanic Training Data set
"""
self.Xy = self.Xy_raw.drop(self.drop_columns, axis=1)
def estimate_age(in_df, groupby=['sex','title']):
Xy_age_estimate = in_df.groupby(['sex','title']).age_known.mean().to_frame().round(1)
Xy_age_estimate = Xy_age_estimate.rename(columns ={'age_known':'age_estimate'})
out_df = in_df.reset_index().merge(Xy_age_estimate, on=['sex', 'title'])
out_df['age'] = out_df['age_known'].fillna(out_df['age_estimate'])
return out_df | [
"[email protected]"
] | |
75201fde37f7cebb6c0b276f4e6f89c588af812a | 238e46a903cf7fac4f83fa8681094bf3c417d22d | /VTK/vtk_7.1.1_x64_Debug/lib/python2.7/site-packages/twisted/cred/portal.py | 23e48739cfb924a7a71886e884eb010098d27305 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | baojunli/FastCAE | da1277f90e584084d461590a3699b941d8c4030b | a3f99f6402da564df87fcef30674ce5f44379962 | refs/heads/master | 2023-02-25T20:25:31.815729 | 2021-02-01T03:17:33 | 2021-02-01T03:17:33 | 268,390,180 | 1 | 0 | BSD-3-Clause | 2020-06-01T00:39:31 | 2020-06-01T00:39:31 | null | UTF-8 | Python | false | false | 5,460 | py | # -*- test-case-name: twisted.test.test_newcred -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
The point of integration of application and authentication.
"""
from twisted.internet import defer
from twisted.internet.defer import maybeDeferred
from twisted.python import failure, reflect
from twisted.cred import error
from zope.interface import providedBy, Interface
class IRealm(Interface):
"""
The realm connects application-specific objects to the
authentication system.
"""
def requestAvatar(avatarId, mind, *interfaces):
"""
Return avatar which provides one of the given interfaces.
@param avatarId: a string that identifies an avatar, as returned by
L{ICredentialsChecker.requestAvatarId<twisted.cred.checkers.ICredentialsChecker.requestAvatarId>}
(via a Deferred). Alternatively, it may be
C{twisted.cred.checkers.ANONYMOUS}.
@param mind: usually None. See the description of mind in
L{Portal.login}.
@param interfaces: the interface(s) the returned avatar should
implement, e.g. C{IMailAccount}. See the description of
L{Portal.login}.
@returns: a deferred which will fire a tuple of (interface,
avatarAspect, logout), or the tuple itself. The interface will be
one of the interfaces passed in the 'interfaces' argument. The
'avatarAspect' will implement that interface. The 'logout' object
is a callable which will detach the mind from the avatar.
"""
class Portal:
"""
A mediator between clients and a realm.
A portal is associated with one Realm and zero or more credentials checkers.
When a login is attempted, the portal finds the appropriate credentials
checker for the credentials given, invokes it, and if the credentials are
valid, retrieves the appropriate avatar from the Realm.
This class is not intended to be subclassed. Customization should be done
in the realm object and in the credentials checker objects.
"""
def __init__(self, realm, checkers=()):
"""
Create a Portal to a L{IRealm}.
"""
self.realm = realm
self.checkers = {}
for checker in checkers:
self.registerChecker(checker)
def listCredentialsInterfaces(self):
"""
Return list of credentials interfaces that can be used to login.
"""
return self.checkers.keys()
def registerChecker(self, checker, *credentialInterfaces):
if not credentialInterfaces:
credentialInterfaces = checker.credentialInterfaces
for credentialInterface in credentialInterfaces:
self.checkers[credentialInterface] = checker
def login(self, credentials, mind, *interfaces):
"""
@param credentials: an implementor of
L{twisted.cred.credentials.ICredentials}
@param mind: an object which implements a client-side interface for
your particular realm. In many cases, this may be None, so if the
word 'mind' confuses you, just ignore it.
@param interfaces: list of interfaces for the perspective that the mind
wishes to attach to. Usually, this will be only one interface, for
example IMailAccount. For highly dynamic protocols, however, this
may be a list like (IMailAccount, IUserChooser, IServiceInfo). To
expand: if we are speaking to the system over IMAP, any information
that will be relayed to the user MUST be returned as an
IMailAccount implementor; IMAP clients would not be able to
understand anything else. Any information about unusual status
would have to be relayed as a single mail message in an
otherwise-empty mailbox. However, in a web-based mail system, or a
PB-based client, the ``mind'' object inside the web server
(implemented with a dynamic page-viewing mechanism such as a
Twisted Web Resource) or on the user's client program may be
intelligent enough to respond to several ``server''-side
interfaces.
@return: A deferred which will fire a tuple of (interface,
avatarAspect, logout). The interface will be one of the interfaces
passed in the 'interfaces' argument. The 'avatarAspect' will
implement that interface. The 'logout' object is a callable which
will detach the mind from the avatar. It must be called when the
user has conceptually disconnected from the service. Although in
some cases this will not be in connectionLost (such as in a
web-based session), it will always be at the end of a user's
interactive session.
"""
for i in self.checkers:
if i.providedBy(credentials):
return maybeDeferred(self.checkers[i].requestAvatarId, credentials
).addCallback(self.realm.requestAvatar, mind, *interfaces
)
ifac = providedBy(credentials)
return defer.fail(failure.Failure(error.UnhandledCredentials(
"No checker for %s" % ', '.join(map(reflect.qual, ifac)))))
| [
"lโ[email protected]โ"
] | lโ[email protected]โ |
9050b0975c3054ac27ea2a22854e52f9441d1b2b | 5a16e8cec8cc3900096dd9d6914482f63e81b01f | /conf/settings.py | add336da760b26daeb00f9e74195126b40bd040f | [] | no_license | chenrun666/FR- | 8c3f8181781274e2b895c21c95c9ee731dd3f5ce | 5614c2ca469f2ac9529d83b902ce3411416f13c3 | refs/heads/master | 2020-04-22T19:58:36.785285 | 2019-02-14T02:58:36 | 2019-02-14T02:58:36 | 170,626,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,327 | py | # ๆต่ฏ็ฏๅข
TEST = True
CHOOSESITE = False
# ๅๅกซ็ปๆ
result = {
"accountPassword":"",
"accountType":"",
"accountUsername":"",
"cardName": "",
"cardNumber": "",
"checkStatus": True,
"clientType": "",# ่ทๅ็ๅฎขๆท็ซฏ็
"createTaskStatus": True,
"linkEmail": "",
"linkEmailPassword": "",
"linkPhone": "",
"machineCode": "58.57.62.229:20181",# ่ทๅๅฎขๆท็ซฏip
"nameList": [],# ๅฆๆๆฏๆไนๅฎขๅๅผๅบ๏ผnameList้ๆพๆฌๆฌก่ทๅๆๅ็ไนๅฎขๅงๅ๏ผๅไธชไนๆฏ้ๅ
"payTaskId": 0,
"pnr": "ZTY2TG",# ่ทๅๆๅ็pnr
"price": 0.00, # ๆฏไป็ๆบ็ฅจๅซ็จๆปไปท
"baggagePrice":0.00,# ๆฏไป่กๆๆปไปท
"sourceCur": "CNY",
"errorMessage":"",
"status": 0, # 350 ไฟ็ๆๅ๏ผ301 ไฟ็ๅคฑ่ดฅ๏ผ 450 ๆฏไปๆๅ ๏ผ401 ๆฏไปๅคฑ่ดฅ
"targetCur": "MYR",
"promo":"ไฝฟ็จ็ไผๆ ็ ",
"creditEmail":"ไฟก็จ่ดฆๅท้ฎ็ฎฑ",
"creditEmailCost":"ไฟก็จ่ดฆๅท่ฑ่ดน",
}
bookStatus = {
"BookingFail" : 301, #301, "้ขๅฎๅคฑ่ดฅ"
"PriceVerifyFail" : 340, #340, "่ทๅๅคฑ่ดฅ๏ผๆง่กไธไธๆก่งๅ"
"BookingSuccess" : 350, #350, "้ขๅฎๆๅ"
"PayFail" : 401, #401, "ๆฏไปๅคฑ่ดฅ"
"PayFailAfterSubmitCard" : 440, #440, "ๆไบคๅกๅทๅๅคฑ่ดฅ"
"PaySuccess" : 450 #450, "ๆฏไปๆๅ"
} | [
"[email protected]"
] | |
5b94d7cb3c951405797f09f7785cf0ac70ee2123 | 07c75f8717683b9c84864c446a460681150fb6a9 | /back_cursor/S-scrapy/zhilianspider2/zhilianspider2/settings.py | 76979aa5a98fe2c2624dddecb681be245fbf0fda | [] | no_license | laomu/py_1709 | 987d9307d9025001bd4386381899eb3778f9ccd6 | 80630e6ac3ed348a2a6445e90754bb6198cfe65a | refs/heads/master | 2021-05-11T09:56:45.382526 | 2018-01-19T07:08:00 | 2018-01-19T07:08:00 | 118,088,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,160 | py | # -*- coding: utf-8 -*-
# Scrapy settings for zhilianspider2 project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'zhilianspider2'
SPIDER_MODULES = ['zhilianspider2.spiders']
NEWSPIDER_MODULE = 'zhilianspider2.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'zhilianspider2 (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'zhilianspider2.middlewares.Zhilianspider2SpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'zhilianspider2.middlewares.Zhilianspider2DownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'zhilianspider2.pipelines.Zhilianspider2Pipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"[email protected]"
] | |
4d74b892e84c8743de61cae4f7390681d20d20af | 496b6f92d62999ee88a8a1ff6dfe64285ec3fc56 | /ayush_crowdbotics_211/urls.py | 21b8756b708b4532e4e76355dd3f3bcf448d6d08 | [] | no_license | payush/ayush-crowdbotics-211 | cab04122c6c605d1fa6993630cd81dd39a81e1f5 | 72ddd9b7d506faa430215575913fb09834051a63 | refs/heads/master | 2020-03-23T10:19:23.756211 | 2018-07-18T13:16:00 | 2018-07-18T13:16:00 | 141,437,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | """ayush_crowdbotics_211 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url('', include('home.urls')),
url(r'^accounts/', include('allauth.urls')),
url(r'^api/v1/', include('home.api.v1.urls')),
url(r'^admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
307f587d31cc07e174370678eb26c5487377e342 | 157cf9d7327499d86162eb0170684f4b02a9804a | /scrapylib/proxy.py | 8f19e099c4210fe2780818d83ace596d2a39f9ed | [] | no_license | alepharchives/scrapylib | 8f59f6f1abe075adb49fbd28a6f575851cab3099 | 9d84cca95952a19d85c3229df7105502649d99e0 | refs/heads/master | 2021-01-24T01:58:45.568968 | 2012-11-07T20:13:38 | 2012-11-07T20:13:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,322 | py | import base64
from urllib import unquote
from urllib2 import _parse_proxy
from urlparse import urlunparse
from scrapy.conf import settings
class SelectiveProxyMiddleware(object):
"""A middleware to enable http proxy to selected spiders only.
Settings:
HTTP_PROXY -- proxy uri. e.g.: http://user:[email protected]:port
PROXY_SPIDERS -- all requests from these spiders will be routed
through the proxy
"""
def __init__(self):
self.proxy = self.parse_proxy(settings.get('HTTP_PROXY'), 'http')
self.proxy_spiders = set(settings.getlist('PROXY_SPIDERS', []))
def parse_proxy(self, url, orig_type):
proxy_type, user, password, hostport = _parse_proxy(url)
proxy_url = urlunparse((proxy_type or orig_type, hostport, '', '', '', ''))
if user and password:
user_pass = '%s:%s' % (unquote(user), unquote(password))
creds = base64.b64encode(user_pass).strip()
else:
creds = None
return creds, proxy_url
def process_request(self, request, spider):
if spider.name in self.proxy_spiders:
creds, proxy = self.proxy
request.meta['proxy'] = proxy
if creds:
request.headers['Proxy-Authorization'] = 'Basic ' + creds
| [
"[email protected]"
] | |
3baf8e9914792b2d398347aa85e55b038c491263 | 89fea7d230e282b3bd3cf2d7db1b003e572e6fa8 | /genconf/views.py | 34042b1fcf0916e223d81ec77069c612f784a390 | [] | no_license | madron/genconf | 45abaf66010944e2df9ca1bdaa32328267c62584 | 99c7d82d55b5075299940adfeaff903b0c70bc8b | refs/heads/master | 2020-12-04T11:51:16.029577 | 2016-01-15T09:35:01 | 2016-01-15T09:35:01 | 231,754,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | from django.views.generic import DetailView
from . import models
class ConfigurationView(DetailView):
http_method_names = ['get']
model = models.Router
template_name = 'genconf/configuration/configuration.txt'
| [
"[email protected]"
] | |
78f02da277f4298af340f876baf26f6f0f8ce38a | 9b2255e0a474555d8a4d90f586e280d40224a181 | /apps/common/urls.py | b60c575f1aeced024158b7e6f3fbde0719d8e8eb | [] | no_license | rogeriofalcone/redirector | 85f496f7c3a3c755b2d9f86f90d25ace783842e4 | 8255be80ce4e3245317864dcc580a1ef68a7c244 | refs/heads/master | 2020-04-08T07:03:19.053680 | 2012-08-12T19:13:35 | 2012-08-12T19:13:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,259 | py | from django.conf.urls.defaults import patterns, url
from django.views.generic.simple import direct_to_template
from django.conf import settings
urlpatterns = patterns('common.views',
url(r'^about/$', direct_to_template, {'template': 'about.html'}, 'about_view'),
url(r'^changelog/$', 'changelog_view', (), 'changelog_view'),
url(r'^license/$', 'license_view', (), 'license_view'),
url(r'^password/change/done/$', 'password_change_done', (), name='password_change_done'),
url(r'^object/multiple/action/$', 'multi_object_action_view', (), name='multi_object_action_view'),
url(r'^user/$', 'current_user_details', (), 'current_user_details'),
url(r'^user/edit/$', 'current_user_edit', (), 'current_user_edit'),
url(r'^login/$', 'login_view', (), name='login_view'),
)
urlpatterns += patterns('',
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/top_redirect/'}, name='logout_view'),
url(r'^password/change/$', 'django.contrib.auth.views.password_change', {'template_name': 'password_change_form.html', 'post_change_redirect': '/password/change/done/'}, name='password_change_view'),
url(r'^password/reset/$', 'django.contrib.auth.views.password_reset', {'email_template_name': 'password_reset_email.html', 'template_name': 'password_reset_form.html', 'post_reset_redirect': '/password/reset/done'}, name='password_reset_view'),
url(r'^password/reset/confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$', 'django.contrib.auth.views.password_reset_confirm', {'template_name': 'password_reset_confirm.html', 'post_reset_redirect': '/password/reset/complete/'}, name='password_reset_confirm_view'),
url(r'^password/reset/complete/$', 'django.contrib.auth.views.password_reset_complete', {'template_name': 'password_reset_complete.html'}, name='password_reset_complete_view'),
url(r'^password/reset/done/$', 'django.contrib.auth.views.password_reset_done', {'template_name': 'password_reset_done.html'}, name='password_reset_done_view'),
# (r'^favicon\.ico$', 'django.views.generic.simple.redirect_to', {'url': '%s%s' % (settings.STATIC_URL, 'images/favicon.ico')}),
)
urlpatterns += patterns('',
url(r'^set_language/$', 'django.views.i18n.set_language', name='set_language'),
)
| [
"[email protected]"
] | |
0b66f6ab3a183691b0308c29dabfc36e889109a5 | 997c82f5d9684945fb2f5d5481dc4d251a93755f | /famapy/core/operations/products.py | 70f4b1107b28b43b64401baecdbb3814a6c47f11 | [] | no_license | jmhorcas/famapy-aafms | a6e45b5fff2c820037daf95151df5bc6895b1611 | bcc80f7061bed4d6bfd536f9d53cf195bffa01e6 | refs/heads/main | 2023-08-24T05:51:47.337325 | 2021-10-15T10:18:20 | 2021-10-15T10:18:20 | 389,559,981 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | from abc import abstractmethod
from typing import Any
from famapy.core.operations import Operation
class Products(Operation):
@abstractmethod
def __init__(self) -> None:
pass
@abstractmethod
def get_products(self) -> list[Any]:
pass
| [
"[email protected]"
] | |
8fd4193624ed4b3ec5193cdc4ac863af4ddabfdf | 50b77b527b95659c6ac8484a1091a70b4ad25d73 | /2019/19/aoc19.py | fdfbb0fd8a49953dc81279bd988da641306ef860 | [] | no_license | cjuub/advent-of-code | d3a4569dd0b7bf7e10dc6a76a1ffe569df4e93a2 | bb92d8ae96cde8c3e57abed26019e692fa6e168f | refs/heads/master | 2023-01-10T00:32:56.847184 | 2023-01-02T20:46:57 | 2023-01-02T20:46:57 | 160,243,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,337 | py | #!/usr/bin/env python3
from typing import List
class IntCodeComputer:
OP_ADD = 1
OP_MUL = 2
OP_LOAD = 3
OP_STORE = 4
OP_JUMP_IF_TRUE = 5
OP_JUMP_IF_FALSE = 6
OP_LESS_THAN = 7
OP_EQUALS = 8
OP_REL_BASE = 9
OP_HALT = 99
class HaltException(Exception):
pass
def __init__(self, memory: List[int]):
self._memory = memory[:] + [0] * 100000
self._pc = 0
self._input = []
self._inputs_read = 0
self._output = 0
self._rel_base = 0
self._instructions = {IntCodeComputer.OP_ADD: self._add,
IntCodeComputer.OP_MUL: self._mul,
IntCodeComputer.OP_LOAD: self._load,
IntCodeComputer.OP_STORE: self._store,
IntCodeComputer.OP_JUMP_IF_TRUE: self._jump_if_true,
IntCodeComputer.OP_JUMP_IF_FALSE: self._jump_if_false,
IntCodeComputer.OP_LESS_THAN: self._less_than,
IntCodeComputer.OP_EQUALS: self._equals,
IntCodeComputer.OP_REL_BASE: self._change_rel_base}
def _add(self, op1, op2, res):
self._memory[res] = op1 + op2
self._pc += 4
def _mul(self, op1, op2, res):
self._memory[res] = op1 * op2
self._pc += 4
def _load(self, op1, op2, res):
self._memory[op1] = self._input[self._inputs_read]
self._inputs_read += 1
self._pc += 2
def _store(self, op1, op2, res):
self._output = op1
self._pc += 2
return self._output
def _jump_if_true(self, op1, op2, res):
if op1 != 0:
self._pc = op2
else:
self._pc += 3
def _jump_if_false(self, op1, op2, res):
if op1 == 0:
self._pc = op2
else:
self._pc += 3
def _less_than(self, op1, op2, res):
if op1 < op2:
self._memory[res] = 1
else:
self._memory[res] = 0
self._pc += 4
def _equals(self, op1, op2, res):
if op1 == op2:
self._memory[res] = 1
else:
self._memory[res] = 0
self._pc += 4
def _change_rel_base(self, op1, op2, res):
self._rel_base += op1
self._pc += 2
def execute(self) -> int:
while True:
op_code_str = str(self._memory[self._pc]).rjust(5, '0')
op_code = int(op_code_str[-2:])
op1_mode = int(op_code_str[2])
op2_mode = int(op_code_str[1])
op3_mode = int(op_code_str[0])
if op_code == IntCodeComputer.OP_HALT:
raise IntCodeComputer.HaltException(self._output)
if op1_mode == 0:
# Only instruction with write on op1
if op_code == IntCodeComputer.OP_LOAD:
op1 = self._memory[self._pc + 1]
else:
op1 = self._memory[self._memory[self._pc + 1]]
elif op1_mode == 1:
op1 = self._memory[self._pc + 1]
else:
if op_code == IntCodeComputer.OP_LOAD:
op1 = self._rel_base + self._memory[self._pc + 1]
else:
op1 = self._memory[self._rel_base + self._memory[self._pc + 1]]
if op2_mode == 0:
op2 = self._memory[self._memory[self._pc + 2]]
elif op2_mode == 1:
op2 = self._memory[self._pc + 2]
else:
op2 = self._memory[self._rel_base + self._memory[self._pc + 2]]
if op3_mode == 0:
res = self._memory[self._pc + 3]
elif op3_mode == 1:
res = self._pc + 3
else:
res = self._rel_base + self._memory[self._pc + 3]
ret = self._instructions[op_code](op1, op2, res)
if ret is not None:
return int(ret)
def set_input(self, value):
self._input = value
with open('input.txt') as fp:
code = list(map(int, fp.readline().strip().split(",")))
grid = [['' for y3 in range(100)] for x3 in range(100)]
cnt = 0
for y2 in range(50):
for x2 in range(50):
x = 0
y = 0
comp = IntCodeComputer(code)
comp.set_input([x2, y2])
try:
while True:
out = comp.execute()
grid[y2][x2] = out
if out == 1:
cnt += 1
except IntCodeComputer.HaltException:
pass
for y2 in range(50):
for x2 in range(50):
print(grid[y2][x2], end='')
print()
print('Part 1: ' + str(cnt))
grid = [['.' for y3 in range(200)] for x3 in range(100)]
for y2 in range(100):
for x2 in range(200):
comp = IntCodeComputer(code)
# lol, found it by trial and error and manual binary search
comp.set_input([x2 + 650, y2 + 1097])
try:
while True:
out = comp.execute()
if out == 1:
grid[y2][x2] = '#'
except IntCodeComputer.HaltException:
pass
for y2 in range(100):
for x2 in range(200):
print(grid[y2][x2], end='')
print()
print('Part 2: ' + str((650 + 17) * 10000 + 1097)) | [
"[email protected]"
] | |
990e2217fe711cd73fd1d06d8903d92f9d4bb47a | 7d2f933ed3c54e128ecaec3a771817c4260a8458 | /venv/Lib/site-packages/sklearn/tests/test_random_projection.py | 4a928196200e0a3c341825db400be0947b1e67b0 | [] | no_license | danielmoreira12/BAProject | c61dfb1d0521eb5a28eef9531a00e744bfb0e26a | 859f588305d826a35cc8f7d64c432f54a0a2e031 | refs/heads/master | 2021-01-02T07:17:39.267278 | 2020-02-25T22:27:43 | 2020-02-25T22:27:43 | 239,541,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,126 | py |
import functools
import numpy as np
import pytest
import scipy.sparse as sp
from sklearn.exceptions import DataDimensionalityWarning
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import GaussianRandomProjection
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import _gaussian_random_matrix
from sklearn.random_projection import _sparse_random_matrix
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_raise_message
from sklearn.utils._testing import assert_raises
from sklearn.utils._testing import assert_warns
all_sparse_random_matrix = [_sparse_random_matrix]
all_dense_random_matrix = [_gaussian_random_matrix]
all_random_matrix = all_sparse_random_matrix + all_dense_random_matrix
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
np.full((10, 10), 0.5))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert random_matrix(1, 5).shape == (1, 5)
assert random_matrix(5, 1).shape == (5, 1)
assert random_matrix(5, 5).shape == (5, 5)
assert random_matrix(1, 1).shape == (1, 1)
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
@pytest.mark.parametrize("random_matrix", all_random_matrix)
def test_basic_property_of_random_matrix(random_matrix):
# Check basic properties of random matrix generation
check_input_size_random_matrix(random_matrix)
check_size_generated(random_matrix)
check_zero_mean_and_unit_norm(random_matrix)
@pytest.mark.parametrize("random_matrix", all_sparse_random_matrix)
def test_basic_property_of_sparse_random_matrix(random_matrix):
check_input_with_sparse_random_matrix(random_matrix)
random_matrix_dense = functools.partial(random_matrix, density=1.0)
check_zero_mean_and_unit_norm(random_matrix_dense)
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = _gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = _sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert np.sqrt(s) / np.sqrt(n_components) in values
assert - np.sqrt(s) / np.sqrt(n_components) in values
if density == 1.0:
assert np.size(values) == 2
else:
assert 0. in values
assert np.size(values) == 3
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [[0, 1, 2]])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert distances_ratio.max() < 1 + eps
assert 1 - eps < distances_ratio.min()
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert rp.n_components == 'auto'
assert rp.n_components_ == 110
if RandomProjection in all_SparseRandomProjection:
assert rp.density == 'auto'
assert_almost_equal(rp.density_, 0.03, 2)
assert rp.components_.shape == (110, n_features)
projected_1 = rp.transform(data)
assert projected_1.shape == (n_samples, 110)
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert projected.shape == (n_samples, 100)
assert rp.components_.shape == (100, n_features)
assert rp.components_.nnz < 115 # close to 1% density
assert 85 < rp.components_.nnz # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
# TODO remove in 0.24
def test_deprecations():
with pytest.warns(FutureWarning, match="deprecated in 0.22"):
gaussian_random_matrix(10, 100)
with pytest.warns(FutureWarning, match="deprecated in 0.22"):
sparse_random_matrix(10, 100)
| [
"[email protected]"
] | |
7f6bd6db1950bb212336a9d800d41cf1c6515222 | 27556d221db5669fd74dd57344ded4cf2942c0ae | /contact/views.py | b49b69a35b79e8d1d331c3b66fb8e75ee39ac4b8 | [] | no_license | edzen12/sendEmail | 60a9dce424ec80c41b68b1092a55259154c4e080 | eb2a8feb609d9034695674a94308ed70ea600bcd | refs/heads/master | 2023-08-25T03:36:38.538297 | 2021-10-12T08:45:11 | 2021-10-12T08:45:11 | 416,253,922 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | from rest_framework import viewsets, mixins
from contact.models import Person
from contact.serializers import PersonSerializer
class PersonViewSet(mixins.CreateModelMixin, viewsets.GenericViewSet):
queryset = Person.objects.all()
serializer_class = PersonSerializer
| [
"[email protected]"
] | |
7a05fadb04f7095039de7c80f514ced4dad3eeb8 | 8033688716c7b120d8105fb98152467c515d7d03 | /makeScalingFunctionPlot.py | edde28e4600681d9c21512f107d64dfddf7b3b24 | [] | no_license | jonathon-langford/EFT-Fitter | 68214a8f46e9817dc7add99d16e3260ae5d1617d | 1cebdef80497bb66ac2d262e2347c4d8100f94b8 | refs/heads/master | 2023-05-20T06:51:21.341971 | 2021-02-12T19:35:12 | 2021-02-12T19:35:12 | 338,414,103 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,914 | py | import os, sys
import json
import re
from optparse import OptionParser
from collections import OrderedDict as od
from importlib import import_module
import pickle
import ROOT
import numpy as np
def get_options():
parser = OptionParser()
parser.add_option('--pois', dest='pois', default='params.HEL', help="Name of json file storing pois")
parser.add_option('--functions', dest='functions', default='functions.HEL_STXS', help="Name of json file storing functions")
parser.add_option('--inputs', dest='inputs', default='', help="Comma separated list of input files")
parser.add_option("--translateBins", dest="translateBins", default=None, help="Translate STXS bins")
return parser.parse_args()
(opt,args) = get_options()
# Functions for translations
def Translate(name, ndict):
return ndict[name] if name in ndict else name
def LoadTranslations(jsonfilename):
with open(jsonfilename) as jsonfile:
return json.load(jsonfile)
translateBins = {} if opt.translateBins is None else LoadTranslations(opt.translateBins)
# Load parameters of interest
pois = import_module(opt.pois).pois
# Load functions
functions = import_module(opt.functions).functions
# Load input measurements
inputs = []
for i in opt.inputs.split(","):
_cfg = import_module(i)
_input = od()
_input['name'] = _cfg.name
_input['X'] = _cfg.X
_input['rho'] = _cfg.rho
inputs.append(_input)
from tools.fitter import *
fit = fitter(pois,functions,inputs,False)
#stxs_bins = ['ttH']
stxs_bins = ['ZH_lep_PTV_0_75','ZH_lep_PTV_75_150','ZH_lep_PTV_150_250_0J','ZH_lep_PTV_150_250_GE1J','ZH_lep_PTV_GT250','ZH_lep']
scaling = od()
for stxs_bin in stxs_bins:
scaling[stxs_bin] = od()
for poi in pois.keys(): scaling[stxs_bin][poi] = od()
# Quadratic
fit.setLinearOnly(False)
for poi in pois.keys():
scaling[stxs_bin][poi]['quad'] = od()
c, mu = fit.scaling1D(poi,stxs_bin,npoints=1000)
scaling[stxs_bin][poi]['quad']['c'] = c
scaling[stxs_bin][poi]['quad']['mu'] = mu
# Linear
fit.setLinearOnly()
for poi in pois.keys():
scaling[stxs_bin][poi]['lin'] = od()
c,mu = fit.scaling1D(poi,stxs_bin,npoints=1000)
scaling[stxs_bin][poi]['lin']['c'] = c
scaling[stxs_bin][poi]['lin']['mu'] = mu
# Mage graphs
grs = od()
for stxs_bin in stxs_bins:
for poi in pois.keys():
grs['%s_vs_%s_quad'%(stxs_bin,poi)] = ROOT.TGraph()
grs['%s_vs_%s_lin'%(stxs_bin,poi)] = ROOT.TGraph()
for i in range(len(scaling[stxs_bin][poi]['quad']['c'])): grs['%s_vs_%s_quad'%(stxs_bin,poi)].SetPoint( grs['%s_vs_%s_quad'%(stxs_bin,poi)].GetN(),scaling[stxs_bin][poi]['quad']['c'][i], scaling[stxs_bin][poi]['quad']['mu'][i] )
for i in range(len(scaling[stxs_bin][poi]['lin']['c'])): grs['%s_vs_%s_lin'%(stxs_bin,poi)].SetPoint( grs['%s_vs_%s_lin'%(stxs_bin,poi)].GetN(),scaling[stxs_bin][poi]['lin']['c'][i], scaling[stxs_bin][poi]['lin']['mu'][i] )
# Make plot
styleMap = od()
styleMap['quad'] = {'LineWidth':3,'LineStyle':1,'MarkerSize':0}
styleMap['quad_dummy'] = {'LineWidth':3,'LineStyle':1,'MarkerSize':0}
styleMap['lin'] = {'LineWidth':2, 'LineStyle':2,'MarkerSize':0}
styleMap['lin_dummy'] = {'LineColor':12, 'LineWidth':2, 'LineStyle':2,'MarkerSize':0}
#styleMap['lin_dummy'] = {'LineColor':ROOT.kMagenta-7, 'LineWidth':2, 'LineStyle':2,'MarkerSize':0}
colorMap = od()
colorMap['ZH_lep'] = {'LineColor':ROOT.kRed-4,'MarkerColor':ROOT.kRed-4}
colorMap['ZH_lep_PTV_0_75'] = {'LineColor':ROOT.kGreen-8,'MarkerColor':ROOT.kGreen-8}
colorMap['ZH_lep_PTV_75_150'] = {'LineColor':ROOT.kGreen-7,'MarkerColor':ROOT.kGreen-7}
colorMap['ZH_lep_PTV_150_250_0J'] = {'LineColor':ROOT.kGreen+1,'MarkerColor':ROOT.kGreen+1}
colorMap['ZH_lep_PTV_150_250_GE1J'] = {'LineColor':ROOT.kGreen+3,'MarkerColor':ROOT.kGreen+3}
colorMap['ZH_lep_PTV_GT250'] = {'LineColor':ROOT.kBlack,'MarkerColor':ROOT.kBlack}
colorMap['ttH'] = {'LineColor':ROOT.kMagenta-7,'MarkerColor':ROOT.kMagenta-7}
# POI str
poi = "cWWMinuscB"
hmax = 2.5
import math
m = "%g"%math.log(1/pois[poi]['multiplier'],10)
if m == '1': m = ''
if poi == "cWWMinuscB":
pstr_stripped = "c_{WW} #minus c_{B}"
pstr = "(c_{WW} #minus c_{B}) x 10^{%s}"%m
else:
pstr_stripped = "c_{%s}"%poi.split("c")[-1]
pstr = "c_{%s} x 10^{%s}"%(poi.split("c")[-1],m)
ROOT.gROOT.SetBatch(True)
ROOT.gStyle.SetOptStat(0)
canv = ROOT.TCanvas("canv_%s"%poi,"canv_%s"%poi,700,500)
#canv = ROOT.TCanvas("canv_%s"%poi,"canv_%s"%poi,900,500)
canv.SetBottomMargin(0.15)
canv.SetTickx()
canv.SetTicky()
prange = pois[poi]['range'][1]-pois[poi]['range'][0]
h_axes = ROOT.TH1F("haxes","",100, pois[poi]['range'][0]-0.1*prange, pois[poi]['range'][1]+0.1*prange )
h_axes.SetMaximum(hmax)
h_axes.SetMinimum(-0.2)
h_axes.SetTitle("")
h_axes.GetXaxis().SetTitle(pstr)
h_axes.GetXaxis().SetTitleSize(0.05)
h_axes.GetXaxis().SetLabelSize(0.035)
h_axes.GetYaxis().SetTitle("#mu^{i}_{prod}(%s)"%pstr_stripped)
h_axes.GetYaxis().SetTitleSize(0.05)
h_axes.GetYaxis().SetTitleOffset(0.8)
h_axes.GetYaxis().SetLabelSize(0.035)
h_axes.GetYaxis().SetLabelOffset(0.007)
h_axes.GetYaxis().CenterTitle()
h_axes.SetLineWidth(0)
h_axes.Draw()
for stxs_bin in stxs_bins:
for k, v in colorMap[stxs_bin].iteritems():
getattr(grs["%s_vs_%s_quad"%(stxs_bin,poi)],"Set%s"%k)(v)
getattr(grs["%s_vs_%s_lin"%(stxs_bin,poi)],"Set%s"%k)(v)
for k, v in styleMap['quad'].iteritems(): getattr(grs["%s_vs_%s_quad"%(stxs_bin,poi)],"Set%s"%k)(v)
for k, v in styleMap['lin'].iteritems(): getattr(grs["%s_vs_%s_lin"%(stxs_bin,poi)],"Set%s"%k)(v)
grs["%s_vs_%s_quad"%(stxs_bin,poi)].Draw("Same C")
grs["%s_vs_%s_lin"%(stxs_bin,poi)].Draw("Same C")
# Lines
hlines = {}
yvals = [0,1]
for i in range(len(yvals)):
yval = yvals[i]
hlines['hline_%g'%i] = ROOT.TLine(pois[poi]['range'][0]-0.1*prange,yval,pois[poi]['range'][1]+0.1*prange,yval)
hlines['hline_%g'%i].SetLineColorAlpha(15,0.5)
hlines['hline_%g'%i].SetLineStyle(2)
hlines['hline_%g'%i].SetLineWidth(1)
hlines['hline_%g'%i].Draw("SAME")
vlines = {}
xvals = [pois[poi]['range'][0],0,pois[poi]['range'][1]]
for i in range(len(xvals)):
xval = xvals[i]
vlines['vline_%g'%i] = ROOT.TLine(xval,-0.2,xval,hmax)
vlines['vline_%g'%i].SetLineColorAlpha(15,0.5)
vlines['vline_%g'%i].SetLineStyle(2)
vlines['vline_%g'%i].SetLineWidth(1)
vlines['vline_%g'%i].Draw("SAME")
# Text
lat0 = ROOT.TLatex()
lat0.SetTextFont(42)
lat0.SetTextAlign(11)
lat0.SetNDC()
lat0.SetTextSize(0.045)
lat0.DrawLatex(0.1,0.92,"HEL UFO")
lat1 = ROOT.TLatex()
lat1.SetTextFont(42)
lat1.SetTextAlign(23)
lat1.SetTextSize(0.03)
xpos = pois[poi]['range'][0]-0.05*prange
lat1.DrawLatex(xpos,1.,"'#color[15]{#sigma = #sigma_{SM}}")
lat1.DrawLatex(xpos,0.,"#color[15]{#sigma = 0}")
lat2 = ROOT.TLatex()
lat2.SetTextFont(42)
lat2.SetTextAlign(23)
lat2.SetTextAngle(90)
lat2.SetTextSize(0.045)
lat2.SetTextAlign(21)
lat2.DrawLatex(pois[poi]['range'][0]-0.02*prange,0.9*hmax,"#color[15]{c_{min}}")
lat2.SetTextAlign(23)
lat2.DrawLatex(pois[poi]['range'][1]+0.01*prange,0.9*hmax,"#color[15]{c_{max}}")
# Legend
# Create dummy graph for linear
gr_lin_dummy = ROOT.TGraph()
for k,v in styleMap['lin_dummy'].iteritems(): getattr(gr_lin_dummy,"Set%s"%k)(v)
leg = ROOT.TLegend(0.55,0.22,0.8,0.48)
#leg = ROOT.TLegend(0.63,0.28,0.8,0.38)
leg.SetFillStyle(0)
leg.SetLineColor(0)
leg.SetTextSize(0.0275)
#leg.SetTextSize(0.035)
for stxs_bin in stxs_bins: leg.AddEntry( grs["%s_vs_%s_quad"%(stxs_bin,poi)], Translate(stxs_bin,translateBins), "L")
leg.AddEntry(gr_lin_dummy,"(Lin. terms only)","L")
leg.Draw("Same")
canv.Update()
canv.SaveAs("/eos/home-j/jlangfor/www/CMS/thesis/chapter7/scaling_functions/ZH_lep_vs_%s.png"%poi)
canv.SaveAs("/eos/home-j/jlangfor/www/CMS/thesis/chapter7/scaling_functions/ZH_lep_vs_%s.pdf"%poi)
#canv.SaveAs("/eos/home-j/jlangfor/www/CMS/thesis/chapter7/scaling_functions/ttH_vs_%s.png"%poi)
#canv.SaveAs("/eos/home-j/jlangfor/www/CMS/thesis/chapter7/scaling_functions/ttH_vs_%s.pdf"%poi)
| [
"[email protected]"
] | |
e12d6762e76b184388535c634d9a135c76ad728f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02400/s312694229.py | 0e3f00ee5086355e387498233f562d240a9c9fa5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | import sys
import math
def main():
r = float(sys.stdin.readline())
print(math.pi * r**2, 2 * math.pi * r)
return
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
a950c831386ddc3bd2049db23e626695c198bb3a | 6b5572557c4a0785c4b727ee024790ec066ad6f2 | /Baekjoon/๋์ ๊ณํ๋ฒ 1/1, 2, 3 ๋ํ๊ธฐ.py | c018911cffa41fdf3bdb10a60894fd54e9209e12 | [] | no_license | easternpillar/AlgorithmTraining | 5be38998dc062d1d02933f61eaca3265e1b73981 | c8f05eda86161a7dbacab99154be1af292e7db8a | refs/heads/master | 2023-04-29T11:13:34.984005 | 2023-04-08T07:12:29 | 2023-04-08T07:12:29 | 231,875,419 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | # Problem:
# Reference: https://www.acmicpc.net/problem/9095
# My Solution:
import sys
dp = [0 for _ in range(11)]
dp[0] = 1
for i in range(1, 4, 1):
for j in range(1,11):
tot = 0
for k in range(j - i, j, 1):
if k >= 0:
tot += dp[k]
dp[j] = tot
for _ in range(int(sys.stdin.readline().rstrip())):
num = int(sys.stdin.readline().rstrip())
print(dp[num])
| [
"[email protected]"
] | |
3fae578b5162e7f5acb831c405be63172c98b6df | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/cache/300a07a56fa3435d495e1ce8762b25d84931bfae7c2899c2825326bcc799b818/typing/re.py | 2336d186762e5e40248962573d6c349ef6e6ffaa | [] | no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | # encoding: utf-8
# module typing.re
# from C:\Users\Doly\Anaconda3\lib\site-packages\statsmodels\tsa\statespace\_representation.cp37-win_amd64.pyd
# by generator 1.147
""" Wrapper namespace for re type aliases. """
# no imports
# functions
def Match(*args, **kwargs): # real signature unknown
"""
The central part of internal API.
This represents a generic version of type 'origin' with type arguments 'params'.
There are two kind of these aliases: user defined and special. The special ones
are wrappers around builtin collections and ABCs in collections.abc. These must
have 'name' always set. If 'inst' is False, then the alias can't be instantiated,
this is used by e.g. typing.List and typing.Dict.
"""
pass
def Pattern(*args, **kwargs): # real signature unknown
"""
The central part of internal API.
This represents a generic version of type 'origin' with type arguments 'params'.
There are two kind of these aliases: user defined and special. The special ones
are wrappers around builtin collections and ABCs in collections.abc. These must
have 'name' always set. If 'inst' is False, then the alias can't be instantiated,
this is used by e.g. typing.List and typing.Dict.
"""
pass
# no classes
# variables with complex values
__all__ = [
'Pattern',
'Match',
]
__weakref__ = None # (!) real value is "<attribute '__weakref__' of 'typing.re' objects>"
| [
"[email protected]"
] | |
167439e261d55b4a6013812c4b86be943e29dd30 | 16e8129f7a12239ed49aabfa04549d90419bb12e | /old_explore.py | 1b2cf18e07a1f4df23eb6398cb62957b72fd8c45 | [] | no_license | Seanny123/hrl_analysis | 0a5a4ff8b672d05760e39ec5558557220d71459d | 7ccf2beea6090c1493c4dce95630ef251f9c6548 | refs/heads/master | 2020-05-18T20:59:29.098987 | 2015-01-07T23:18:56 | 2015-01-07T23:18:56 | 28,878,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | not_equal = []
count = 0
for i,j in zip(file_list[0], file_list[1]):
if(i != j ):
not_equal.append(1)
count += 1
else:
not_equal.append(0) | [
"[email protected]"
] | |
48d4b9fe1ff3432fc5ef22a33a9d4014933c5d2c | 53983c1dbd4e27d918237d22287f1838ae42cc92 | /tools/txtIO.py | 03e55b006916f7db35cb202eb15e7466473f3329 | [] | no_license | xshii/MDAOXS | da5060ea6b6ac600b3b85dddbb7460f62ab4a684 | d4c54b79d7c84740bf01d8e8573e54522de2e6d0 | refs/heads/master | 2021-09-24T10:35:31.295574 | 2018-10-08T10:54:44 | 2018-10-08T10:54:44 | 108,884,304 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | import numpy as np
def stlTxtToNpArray(path):
path = r"/Users/gakki/Downloads/SU2_mesh_point_clouds/Optimale_orig_points.txt"
mesh = np.empty([1,3])
with open(path) as fp:
for line in fp:
if line.__len__()<5:
continue
line = line.strip().split(';')
line = list(map(str.strip, line))
if line[1].startswith('-'):
if line[1].count('-') == 2:
line[1] = line[1].replace('-','e-')[1:]
else:
line[1] = line[1].replace('-','e-')
mesh = np.vstack([mesh,np.array(line,dtype='float')])
mesh = mesh[1:,:]
return mesh
if __name__=='__main__':
path = r"/Users/gakki/Downloads/SU2_mesh_point_clouds/Optimale_orig_points.txt"
mesh = stlTxtToNpArray(path=path)
np.savetxt('new_mesh.txt',mesh,delimiter=';') | [
"[email protected]"
] | |
66c824ae5c6b6b235b3bb178b980a4953d6ba68e | b615aa786c2a57809196713920e784187b1c1cd6 | /53_finetune_crf_loss_new_mask_LOC_max_seq_128_batch_32_lr5e5_lr5e5/CRF2.py | cee3a4b4c1166c0eead2b032679d7c877b88dd3f | [] | no_license | hoon4233/KoElectra-CRF | 5ae14d883271e78fcd344b169dddf49a34789af1 | 0adf89715d1369de097160cb821f931386f2ebb0 | refs/heads/master | 2023-07-16T19:19:25.229933 | 2021-09-04T03:13:48 | 2021-09-04T03:13:48 | 402,653,513 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,310 | py | from typing import List, Optional
import torch
import torch.nn as nn
class CRF(nn.Module):
"""Conditional random field.
This module implements a conditional random field [LMP01]_. The forward computation
of this class computes the log likelihood of the given sequence of tags and
emission score tensor. This class also has `~CRF.decode` method which finds
the best tag sequence given an emission score tensor using `Viterbi algorithm`_.
Args:
num_tags: Number of tags.
batch_first: Whether the first dimension corresponds to the size of a minibatch.
Attributes:
start_transitions (`~torch.nn.Parameter`): Start transition score tensor of size
``(num_tags,)``.
end_transitions (`~torch.nn.Parameter`): End transition score tensor of size
``(num_tags,)``.
transitions (`~torch.nn.Parameter`): Transition score tensor of size
``(num_tags, num_tags)``.
.. [LMP01] Lafferty, J., McCallum, A., Pereira, F. (2001).
"Conditional random fields: Probabilistic models for segmenting and
labeling sequence data". *Proc. 18th International Conf. on Machine
Learning*. Morgan Kaufmann. pp. 282โ289.
.. _Viterbi algorithm: https://en.wikipedia.org/wiki/Viterbi_algorithm
"""
def __init__(self, num_tags: int, batch_first: bool = False) -> None:
if num_tags <= 0:
raise ValueError(f'invalid number of tags: {num_tags}')
super().__init__()
self.num_tags = num_tags
self.batch_first = batch_first
self.start_transitions = nn.Parameter(torch.empty(num_tags))
self.end_transitions = nn.Parameter(torch.empty(num_tags))
self.transitions = nn.Parameter(torch.empty(num_tags, num_tags))
self.reset_parameters()
def reset_parameters(self) -> None:
"""Initialize the transition parameters.
The parameters will be initialized randomly from a uniform distribution
between -0.1 and 0.1.
"""
nn.init.uniform_(self.start_transitions, -0.1, 0.1)
nn.init.uniform_(self.end_transitions, -0.1, 0.1)
nn.init.uniform_(self.transitions, -0.1, 0.1)
def __repr__(self) -> str:
return f'{self.__class__.__name__}(num_tags={self.num_tags})'
def forward(
self,
emissions: torch.Tensor,
tags: torch.LongTensor,
mask: Optional[torch.ByteTensor] = None,
reduction: str = 'sum',
) -> torch.Tensor:
"""Compute the conditional log likelihood of a sequence of tags given emission scores.
Args:
emissions (`~torch.Tensor`): Emission score tensor of size
``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length, num_tags)`` otherwise.
tags (`~torch.LongTensor`): Sequence of tags tensor of size
``(seq_length, batch_size)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length)`` otherwise.
mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``
if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.
reduction: Specifies the reduction to apply to the output:
``none|sum|mean|token_mean``. ``none``: no reduction will be applied.
``sum``: the output will be summed over batches. ``mean``: the output will be
averaged over batches. ``token_mean``: the output will be averaged over tokens.
Returns:
`~torch.Tensor`: The log likelihood. This will have size ``(batch_size,)`` if
reduction is ``none``, ``()`` otherwise.
"""
self._validate(emissions, tags=tags, mask=mask)
if reduction not in ('none', 'sum', 'mean', 'token_mean'):
raise ValueError(f'invalid reduction: {reduction}')
if mask is None:
mask = torch.ones_like(tags, dtype=torch.uint8)
if self.batch_first:
emissions = emissions.transpose(0, 1)
tags = tags.transpose(0, 1)
mask = mask.transpose(0, 1)
# shape: (batch_size,)
numerator = self._compute_score(emissions, tags, mask)
# shape: (batch_size,)
denominator = self._compute_normalizer(emissions, mask)
# shape: (batch_size,)
llh = numerator - denominator
if reduction == 'none':
return llh
if reduction == 'sum':
return llh.sum()
if reduction == 'mean':
return llh.mean()
assert reduction == 'token_mean'
return llh.sum() / mask.float().sum()
def decode(self, emissions: torch.Tensor,
mask: Optional[torch.ByteTensor] = None) -> List[List[int]]:
"""Find the most likely tag sequence using Viterbi algorithm.
Args:
emissions (`~torch.Tensor`): Emission score tensor of size
``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length, num_tags)`` otherwise.
mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``
if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.
Returns:
List of list containing the best tag sequence for each batch.
"""
self._validate(emissions, mask=mask)
if mask is None:
mask = emissions.new_ones(emissions.shape[:2], dtype=torch.uint8)
if self.batch_first:
emissions = emissions.transpose(0, 1)
mask = mask.transpose(0, 1)
return self._viterbi_decode(emissions, mask)
def _validate(
self,
emissions: torch.Tensor,
tags: Optional[torch.LongTensor] = None,
mask: Optional[torch.ByteTensor] = None) -> None:
if emissions.dim() != 3:
raise ValueError(f'emissions must have dimension of 3, got {emissions.dim()}')
if emissions.size(2) != self.num_tags:
raise ValueError(
f'expected last dimension of emissions is {self.num_tags}, '
f'got {emissions.size(2)}')
if tags is not None:
if emissions.shape[:2] != tags.shape:
raise ValueError(
'the first two dimensions of emissions and tags must match, '
f'got {tuple(emissions.shape[:2])} and {tuple(tags.shape)}')
if mask is not None:
if emissions.shape[:2] != mask.shape:
raise ValueError(
'the first two dimensions of emissions and mask must match, '
f'got {tuple(emissions.shape[:2])} and {tuple(mask.shape)}')
no_empty_seq = not self.batch_first and mask[0].all()
no_empty_seq_bf = self.batch_first and mask[:, 0].all()
if not no_empty_seq and not no_empty_seq_bf:
raise ValueError('mask of the first timestep must all be on')
def _compute_score(
self, emissions: torch.Tensor, tags: torch.LongTensor,
mask: torch.ByteTensor) -> torch.Tensor:
# emissions: (seq_length, batch_size, num_tags)
# tags: (seq_length, batch_size)
# mask: (seq_length, batch_size)
assert emissions.dim() == 3 and tags.dim() == 2
assert emissions.shape[:2] == tags.shape
assert emissions.size(2) == self.num_tags
assert mask.shape == tags.shape
assert mask[0].all()
seq_length, batch_size = tags.shape
mask = mask.float()
# Start transition score and first emission
# shape: (batch_size,)
score = self.start_transitions[tags[0]]
score += emissions[0, torch.arange(batch_size), tags[0]]
for i in range(1, seq_length):
# Transition score to next tag, only added if next timestep is valid (mask == 1)
# shape: (batch_size,)
score += self.transitions[tags[i - 1], tags[i]] * mask[i]
# Emission score for next tag, only added if next timestep is valid (mask == 1)
# shape: (batch_size,)
score += emissions[i, torch.arange(batch_size), tags[i]] * mask[i]
# End transition score
# shape: (batch_size,)
seq_ends = mask.long().sum(dim=0) - 1
# shape: (batch_size,)
last_tags = tags[seq_ends, torch.arange(batch_size)]
# shape: (batch_size,)
score += self.end_transitions[last_tags]
return score
def _compute_normalizer(
self, emissions: torch.Tensor, mask: torch.ByteTensor) -> torch.Tensor:
# emissions: (seq_length, batch_size, num_tags)
# mask: (seq_length, batch_size)
assert emissions.dim() == 3 and mask.dim() == 2
assert emissions.shape[:2] == mask.shape
assert emissions.size(2) == self.num_tags
assert mask[0].all()
seq_length = emissions.size(0)
# Start transition score and first emission; score has size of
# (batch_size, num_tags) where for each batch, the j-th column stores
# the score that the first timestep has tag j
# shape: (batch_size, num_tags)
score = self.start_transitions + emissions[0]
for i in range(1, seq_length):
# Broadcast score for every possible next tag
# shape: (batch_size, num_tags, 1)
broadcast_score = score.unsqueeze(2)
# Broadcast emission score for every possible current tag
# shape: (batch_size, 1, num_tags)
broadcast_emissions = emissions[i].unsqueeze(1)
# Compute the score tensor of size (batch_size, num_tags, num_tags) where
# for each sample, entry at row i and column j stores the sum of scores of all
# possible tag sequences so far that end with transitioning from tag i to tag j
# and emitting
# shape: (batch_size, num_tags, num_tags)
next_score = broadcast_score + self.transitions + broadcast_emissions
# Sum over all possible current tags, but we're in score space, so a sum
# becomes a log-sum-exp: for each sample, entry i stores the sum of scores of
# all possible tag sequences so far, that end in tag i
# shape: (batch_size, num_tags)
next_score = torch.logsumexp(next_score, dim=1)
# Set score to the next score if this timestep is valid (mask == 1)
# shape: (batch_size, num_tags)
score = torch.where(mask[i].unsqueeze(1), next_score, score)
# End transition score
# shape: (batch_size, num_tags)
score += self.end_transitions
# Sum (log-sum-exp) over all possible tags
# shape: (batch_size,)
return torch.logsumexp(score, dim=1)
def _viterbi_decode(self, emissions: torch.FloatTensor,
mask: torch.ByteTensor) -> List[List[int]]:
# emissions: (seq_length, batch_size, num_tags)
# mask: (seq_length, batch_size)
assert emissions.dim() == 3 and mask.dim() == 2
assert emissions.shape[:2] == mask.shape
assert emissions.size(2) == self.num_tags
assert mask[0].all()
seq_length, batch_size = mask.shape
# Start transition and first emission
# shape: (batch_size, num_tags)
score = self.start_transitions + emissions[0]
history = []
# score is a tensor of size (batch_size, num_tags) where for every batch,
# value at column j stores the score of the best tag sequence so far that ends
# with tag j
# history saves where the best tags candidate transitioned from; this is used
# when we trace back the best tag sequence
# Viterbi algorithm recursive case: we compute the score of the best tag sequence
# for every possible next tag
for i in range(1, seq_length):
# Broadcast viterbi score for every possible next tag
# shape: (batch_size, num_tags, 1)
broadcast_score = score.unsqueeze(2)
# Broadcast emission score for every possible current tag
# shape: (batch_size, 1, num_tags)
broadcast_emission = emissions[i].unsqueeze(1)
# Compute the score tensor of size (batch_size, num_tags, num_tags) where
# for each sample, entry at row i and column j stores the score of the best
# tag sequence so far that ends with transitioning from tag i to tag j and emitting
# shape: (batch_size, num_tags, num_tags)
next_score = broadcast_score + self.transitions + broadcast_emission
# Find the maximum score over all possible current tag
# shape: (batch_size, num_tags)
next_score, indices = next_score.max(dim=1)
# Set score to the next score if this timestep is valid (mask == 1)
# and save the index that produces the next score
# shape: (batch_size, num_tags)
score = torch.where(mask[i].unsqueeze(1), next_score, score)
history.append(indices)
# End transition score
# shape: (batch_size, num_tags)
score += self.end_transitions
# Now, compute the best path for each sample
# shape: (batch_size,)
seq_ends = mask.long().sum(dim=0) - 1
best_tags_list = []
for idx in range(batch_size):
# Find the tag which maximizes the score at the last timestep; this is our best tag
# for the last timestep
_, best_last_tag = score[idx].max(dim=0)
best_tags = [best_last_tag.item()]
# We trace back where the best last tag comes from, append that to our best tag
# sequence, and trace it back again, and so on
for hist in reversed(history[:seq_ends[idx]]):
best_last_tag = hist[idx][best_tags[-1]]
best_tags.append(best_last_tag.item())
# Reverse the order because we start from the last timestep
best_tags.reverse()
best_tags_list.append(best_tags)
return best_tags_list
| [
"[email protected]"
] | |
1a37d2f7a618537fb84f62c141d88105e25238a2 | 42c48f3178a48b4a2a0aded547770027bf976350 | /google/ads/google_ads/v4/services/ad_group_service_client_config.py | 1931ef3dfd64f9494a515ac86c2f1db21526b546 | [
"Apache-2.0"
] | permissive | fiboknacky/google-ads-python | e989464a85f28baca1f28d133994c73759e8b4d6 | a5b6cede64f4d9912ae6ad26927a54e40448c9fe | refs/heads/master | 2021-08-07T20:18:48.618563 | 2020-12-11T09:21:29 | 2020-12-11T09:21:29 | 229,712,514 | 0 | 0 | Apache-2.0 | 2019-12-23T08:44:49 | 2019-12-23T08:44:49 | null | UTF-8 | Python | false | false | 964 | py | config = {
"interfaces": {
"google.ads.googleads.v4.services.AdGroupService": {
"retry_codes": {
"idempotent": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 5000,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 3600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 3600000,
"total_timeout_millis": 3600000
}
},
"methods": {
"GetAdGroup": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"MutateAdGroups": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
}
}
}
}
}
| [
"[email protected]"
] | |
8e95de029333c6c144fe7923a72ce823d922cfcf | c2102a9f17a9e08988f367cf785eb3f5d7925854 | /backend/home/migrations/0002_load_initial_data.py | 03d33413376a79c8d6e8efbc30a313757e5182c9 | [] | no_license | crowdbotics-apps/frego-24047 | 44d1569e12748ac327867ac08cfee416ae6eef45 | b30631f14473f965604b937458aea3c7739fd170 | refs/heads/master | 2023-02-19T10:00:37.381026 | 2021-01-25T11:07:44 | 2021-01-25T11:07:44 | 332,718,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "Frego"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">Frego</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "frego-24047.botics.co"
site_params = {
"name": "Frego",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"[email protected]"
] | |
d6ea747b5957732d583916c70b4f80bc1cdb39b4 | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/slxos/v17s_1_02/brocade_mpls_rpc/show_mpls_ldp_fec_prefix_prefix/input/__init__.py | 414039c71bac5dac5968fee3843019053441ab0c | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,492 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class input(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls - based on the path /brocade_mpls_rpc/show-mpls-ldp-fec-prefix-prefix/input. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__ldp_fec_prefix',)
_yang_name = 'input'
_rest_name = 'input'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__ldp_fec_prefix = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}), is_leaf=True, yang_name="ldp-fec-prefix", rest_name="ldp-fec-prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='inet:ipv4-prefix', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_mpls_rpc', u'show-mpls-ldp-fec-prefix-prefix', u'input']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'show-mpls-ldp-fec-prefix-prefix', u'input']
def _get_ldp_fec_prefix(self):
"""
Getter method for ldp_fec_prefix, mapped from YANG variable /brocade_mpls_rpc/show_mpls_ldp_fec_prefix_prefix/input/ldp_fec_prefix (inet:ipv4-prefix)
YANG Description: IP address/Subnet mask length
"""
return self.__ldp_fec_prefix
def _set_ldp_fec_prefix(self, v, load=False):
"""
Setter method for ldp_fec_prefix, mapped from YANG variable /brocade_mpls_rpc/show_mpls_ldp_fec_prefix_prefix/input/ldp_fec_prefix (inet:ipv4-prefix)
If this variable is read-only (config: false) in the
source YANG file, then _set_ldp_fec_prefix is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ldp_fec_prefix() directly.
YANG Description: IP address/Subnet mask length
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}), is_leaf=True, yang_name="ldp-fec-prefix", rest_name="ldp-fec-prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='inet:ipv4-prefix', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ldp_fec_prefix must be of a type compatible with inet:ipv4-prefix""",
'defined-type': "inet:ipv4-prefix",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}), is_leaf=True, yang_name="ldp-fec-prefix", rest_name="ldp-fec-prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='inet:ipv4-prefix', is_config=True)""",
})
self.__ldp_fec_prefix = t
if hasattr(self, '_set'):
self._set()
def _unset_ldp_fec_prefix(self):
self.__ldp_fec_prefix = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}), is_leaf=True, yang_name="ldp-fec-prefix", rest_name="ldp-fec-prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='inet:ipv4-prefix', is_config=True)
ldp_fec_prefix = __builtin__.property(_get_ldp_fec_prefix, _set_ldp_fec_prefix)
_pyangbind_elements = {'ldp_fec_prefix': ldp_fec_prefix, }
| [
"[email protected]"
] | |
ebddad0a10200abb12d5766947407ad330b1c73e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03773/s930206485.py | 73718780627fbe486462c2c936d41e242d1de545 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | import sys
input = sys.stdin.readline
a,b=map(int,input().split())
print((a+b)%24)
| [
"[email protected]"
] | |
5f4d4cbae4be77f5c897b06ef5e96f1d6c45ff12 | 4f875744ccae8fa9225318ce16fc483b7bf2735e | /amazon/missingNumber.py | 51bc4b88b741139e123462001c9eba084a29d249 | [] | no_license | nguyenngochuy91/companyQuestions | 62c0821174bb3cb33c7af2c5a1e83a60e4a29977 | c937fe19be665ba7ac345e1729ff531f370f30e8 | refs/heads/master | 2020-07-27T05:58:36.794033 | 2020-04-10T20:57:15 | 2020-04-10T20:57:15 | 208,893,527 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 1 23:59:19 2020
@author: huyn
"""
#Missing Number
#Given an array containing n distinct numbers taken from 0, 1, 2, ..., n, find the one that is missing from the array.
from typing import List
class Solution:
def missingNumber(self, nums: List[int]) -> int:
size = len(nums)
return size*(size+1)//2-sum(nums) | [
"[email protected]"
] | |
e7ccb286539047d1d436e032546a3938ddce7bf1 | 0b86600e0288c0fefc081a0f428277a68b14882e | /binaire/binaire_II.py | b861b63f8209d81b2dbc50518fcc23ce46f0cebc | [] | no_license | Byliguel/python1-exo7 | 9ede37a8d2b8f384d1ebe3d612e8c25bbe47a350 | fbf6b08f4c1e94dd9f170875eee871a84849399e | refs/heads/master | 2020-09-22T10:16:34.044141 | 2019-12-01T11:52:51 | 2019-12-01T11:52:51 | 225,152,986 | 1 | 0 | null | 2019-12-01T11:51:37 | 2019-12-01T11:51:36 | null | UTF-8 | Python | false | false | 5,199 | py |
##############################
# Binaire - partie II
##############################
from binaire_I import *
##############################
# Activitรฉ 1 - Palindrome en binaire
##############################
## Question 1 ##
def est_palindrome_1(liste):
p = len(liste)
drapeau = True
for i in range(p):
if liste[i] != liste[p-1-i]:
drapeau = False
return drapeau
# Version optimisรฉe :
def est_palindrome_1_bis(liste):
p = len(liste)
for i in range(p//2):
if liste[i] != liste[p-1-i]:
return False
return True
def est_palindrome_2(liste):
liste_inverse = list(reversed(liste))
return liste == liste_inverse
# Test
print("--- Test d'un palindrome ---")
liste = [1,0,1,0,0,1,0,1]
print(est_palindrome_1(liste))
print(est_palindrome_1_bis(liste))
print(est_palindrome_2(liste))
## Question 2 ##
def cherche_palindrome_binaire(N):
num = 0
for n in range(N):
liste_binaire = entier_vers_binaire(n)
if est_palindrome_1(liste_binaire) == True:
num = num + 1
print(num,":",n,"=",entier_vers_binaire(n))
return
# Test
print("--- Palindromes binaires ---")
cherche_palindrome_binaire(1000)
# Le 1000รจme palindrome en binaire est :
#249903 = [1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1]
## Question 3 ##
def cherche_palindrome_decimale(N):
num = 0
for n in range(N):
liste_decimale = entier_vers_decimale(n)
if est_palindrome_1(liste_decimale) == True:
num = num + 1
print(num,":",n)
return
# Test
print("--- Palindromes avec dรฉcimales ---")
cherche_palindrome_decimale(1000)
# Le 1000รจme palindrome en dรฉcimales est :
# 90009
## Question 4 ##
def cherche_bi_palindrome(N):
num = 0
for n in range(N):
liste_binaire = entier_vers_binaire(n)
liste_decimale = entier_vers_decimale(n)
if est_palindrome_1(liste_binaire) == True and est_palindrome_1(liste_decimale):
num = num + 1
print(num,":",n,"=",entier_vers_binaire(n))
return
# Test
print("--- Bi-palindromes ---")
cherche_bi_palindrome(1000)
# Le 20รจme bi-palindrome est
# 585585 = [1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1]
##############################
# Activitรฉ 2 - Opรฉrations logiques
##############################
## Question 1 ##
def OUeg(l1,l2):
n = len(l1)
l = []
for i in range(n):
if l1[i]==1 or l2[i]==1:
l = l + [1]
else:
l = l + [0]
return l
def ETeg(l1,l2):
n = len(l1)
l = []
for i in range(n):
if l1[i]==1 and l2[i]==1:
l = l + [1]
else:
l = l + [0]
return l
def NON(l1):
l = []
for b in l1:
if b==1:
l = l + [0]
else:
l = l + [1]
return l
# Test
print("--- Opรฉrations logiques (mรชme longueur) ---")
l1 = [1,0,1,0,1,0,1]
l2 = [1,0,0,1,0,0,1]
print(l1)
print(l2)
print(OUeg(l1,l2))
print(ETeg(l1,l2))
print(NON(l1))
## Question 2 ##
# Rajouter des zรฉros non significatifs si besoins
def ajouter_zeros(liste,p):
while len(liste)< p:
liste = [0] + liste
return liste
# Test
print("--- Zeros non significatifs ---")
print(ajouter_zeros([1,0,1,1],8))
## Question 3 ##
# Opรฉrations logiques avec des listes de tailles diffรฉrentes
def OU(l1,l2):
p = len(l1)
q = len(l2)
if p>q:
ll2 = ajouter_zeros(l2,p)
return OUeg(l1,ll2)
else:
ll1 = ajouter_zeros(l1,q)
return OUeg(ll1,l2)
def ET(l1,l2):
p = len(l1)
q = len(l2)
if p>q:
ll2 = ajouter_zeros(l2,p)
return ETeg(l1,ll2)
else:
ll1 = ajouter_zeros(l1,q)
return ETeg(ll1,l2)
# Test
print("--- Opรฉrations logiques (cas gรฉnรฉral) ---")
l1 = [1,0,1,0,1,0,1]
l2 = [1,0,0,1,0,]
print(l1)
print(l2)
print(OU(l1,l2))
print(ET(l1,l2))
##############################
# Activitรฉ 3 - Loi de Morgan
##############################
## Question 1 ##
def tous_les_binaires(p):
liste_p = []
for n in range(2**p):
liste_p = liste_p + [entier_vers_binaire(n)]
return liste_p
# Test
print("--- Tous les binaires ---")
print(tous_les_binaires(3))
## Question 2 ##
def toutes_les_listes(p):
if p == 0:
return []
if p == 1:
return [[0],[1]]
liste_p_1 = toutes_les_listes(p-1)
liste_p = [ [0] + l for l in liste_p_1] + [ [1] + l for l in liste_p_1]
return liste_p
# Test
print("--- Toutes les listes ---")
print(toutes_les_listes(3))
## Question 3 ##
# Lois de Morgan
def test_loi_de_morgan(p):
liste_tous = [ajouter_zeros(l,p) for l in tous_les_binaires(p)]
#liste_tous = toutes_les_listes(p)
for l1 in liste_tous:
for l2 in liste_tous:
non_l1_ou_l2 = NON(OU(l1,l2))
non_l1_et_non_l2 = ET(NON(l1),NON(l2))
if non_l1_ou_l2 == non_l1_et_non_l2:
print("Vrai")
# pass
else:
print("Faux",l1,l2)
return
# Test
print("--- Test loi de Morgan ---")
test_loi_de_morgan(2)
| [
"[email protected]"
] | |
5f7da7fa319f1cc207012d4a7c768df8dbb81213 | b096dbccb31d3bd181259e930816964c71034ff4 | /tests/test_asynchronous/test_task.py | 73017b8eafee20bb87f1953d01201f1490e7e409 | [] | no_license | cosphere-org/lily | b68f95720381a69ce0caa5f47fca461b3f5242a9 | f6a8281e10eedcccb86fcf3a26aaf282d91f70f4 | refs/heads/master | 2023-02-18T13:49:03.568989 | 2022-06-30T09:58:23 | 2022-06-30T09:58:23 | 175,789,374 | 6 | 0 | null | 2023-02-15T18:49:10 | 2019-03-15T09:28:05 | Python | UTF-8 | Python | false | false | 369 | py |
from unittest import TestCase
from lily.asynchronous import AsyncTask
class AsyncTaskTestCase(TestCase):
def test_init(self):
def fn():
pass
task = AsyncTask(callback=fn, args=[9, 1])
assert task.callback == fn
assert task.args == [9, 1]
assert task.successful is False
assert task.result is None
| [
"[email protected]"
] | |
3ba8f7fac04d4e7b45bfe7128eff82a0fb4248dc | 974d04d2ea27b1bba1c01015a98112d2afb78fe5 | /test/ipu/test_gelu_op_ipu.py | 5877341afb1264b0ffe18dd0fbecc822be5d9904 | [
"Apache-2.0"
] | permissive | PaddlePaddle/Paddle | b3d2583119082c8e4b74331dacc4d39ed4d7cff0 | 22a11a60e0e3d10a3cf610077a3d9942a6f964cb | refs/heads/develop | 2023-08-17T21:27:30.568889 | 2023-08-17T12:38:22 | 2023-08-17T12:38:22 | 65,711,522 | 20,414 | 5,891 | Apache-2.0 | 2023-09-14T19:20:51 | 2016-08-15T06:59:08 | C++ | UTF-8 | Python | false | false | 2,151 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test_ipu import IPUOpTest
import paddle
import paddle.static
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
self.feed_fp16 = {'in_0': data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {"approximate": False}
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32'
)
out = paddle.nn.functional.gelu(x, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
def set_atol(self):
self.atol = 1e-10
self.rtol = 1e-6
self.atol_fp16 = 2e-3
self.rtol_fp16 = 1e-3
def set_op_attrs(self):
self.attrs = {"approximate": True}
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
cd11f4c013bbbf9e2770dc15bde51f95098d6eac | a43cf3cacf518096737dd39833fd39624f8cf543 | /tests/test_csv_adapters.py | 071ac85a5c6f6098d645e145a468f026e11bcd6a | [
"Apache-2.0"
] | permissive | Mickey1964/antevents-python | f6ad4f9b056550055a223f7d4a7d34bc030c1dfb | 5b9226813583141986014fc83f6f74342a5f271e | refs/heads/master | 2021-06-15T11:23:56.253643 | 2017-03-31T05:25:59 | 2017-03-31T05:25:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,236 | py | # Copyright 2016 by MPI-SWS and Data-Ken Research.
# Licensed under the Apache 2.0 License.
"""Verify the csv reader/writer through a round trip
"""
import unittest
import time
from tempfile import NamedTemporaryFile
import os
import asyncio
import datetime
from antevents.base import Scheduler, IterableAsPublisher, SensorEvent
from antevents.adapters.csv import CsvReader, default_event_mapper
import antevents.linq.dispatch
from utils import make_test_publisher, CaptureSubscriber, \
SensorEventValidationSubscriber
NUM_EVENTS=5
class TestCases(unittest.TestCase):
def test_default_mapper(self):
"""Verify the class that maps between an event and a sensor
"""
event = SensorEvent(ts=time.time(), sensor_id=1, val=123.456)
row = default_event_mapper.event_to_row(event)
event2 = default_event_mapper.row_to_event(row)
self.assertEqual(event2, event,
"Round-tripped event does not match original event")
def test_file_write_read(self):
tf = NamedTemporaryFile(mode='w', delete=False)
tf.close()
try:
sensor = make_test_publisher(1, stop_after_events=NUM_EVENTS)
capture = CaptureSubscriber()
sensor.subscribe(capture)
sensor.csv_writer(tf.name)
scheduler = Scheduler(asyncio.get_event_loop())
scheduler.schedule_recurring(sensor)
print("Writing sensor events to temp file")
scheduler.run_forever()
self.assertTrue(capture.completed, "CaptureSubscriber did not complete")
self.assertEqual(len(capture.events), NUM_EVENTS,
"number of events captured did not match generated events")
reader = CsvReader(tf.name)
vs = SensorEventValidationSubscriber(capture.events, self)
reader.subscribe(vs)
scheduler.schedule_recurring(reader)
print("reading sensor events back from temp file")
scheduler.run_forever()
self.assertTrue(vs.completed, "ValidationSubscriber did not complete")
finally:
os.remove(tf.name)
# data for rollover test
ROLLING_FILE1 = 'dining-room-2015-01-01.csv'
ROLLING_FILE2 = 'dining-room-2015-01-02.csv'
FILES = [ROLLING_FILE1, ROLLING_FILE2]
def make_ts(day, hr, minute):
return (datetime.datetime(2015, 1, day, hr, minute) - datetime.datetime(1970,1,1)).total_seconds()
EVENTS = [SensorEvent('dining-room', make_ts(1, 11, 1), 1),
SensorEvent('dining-room', make_ts(1, 11, 2), 2),
SensorEvent('dining-room', make_ts(2, 11, 1), 3),
SensorEvent('dining-room', make_ts(2, 11, 2), 4)]
# data for dispatch test
sensor_ids = ['dining-room', 'living-room']
ROLLING_FILE3 = 'living-room-2015-01-01.csv'
ROLLING_FILE4 = 'living-room-2015-01-02.csv'
FILES2 = [ROLLING_FILE1, ROLLING_FILE2, ROLLING_FILE3, ROLLING_FILE4]
EVENTS2 = [SensorEvent('dining-room', make_ts(1, 11, 1), 1),
SensorEvent('living-room', make_ts(1, 11, 2), 2),
SensorEvent('living-room', make_ts(2, 11, 1), 3),
SensorEvent('dining-room', make_ts(2, 11, 2), 4)]
def make_rule(sensor_id):
return (lambda evt: evt.sensor_id==sensor_id, sensor_id)
dispatch_rules = [make_rule(s) for s in sensor_ids]
class TestRollingCsvWriter(unittest.TestCase):
def _cleanup(self):
for f in FILES2:
if os.path.exists(f):
os.remove(f)
def setUp(self):
self._cleanup()
def tearDown(self):
self._cleanup()
def test_rollover(self):
def generator():
for e in EVENTS:
yield e
sensor = IterableAsPublisher(generator(), name='sensor')
sensor.rolling_csv_writer('.', 'dining-room')
vs = SensorEventValidationSubscriber(EVENTS, self)
sensor.subscribe(vs)
scheduler = Scheduler(asyncio.get_event_loop())
scheduler.schedule_recurring(sensor)
scheduler.run_forever()
for f in FILES:
self.assertTrue(os.path.exists(f), 'did not find file %s' % f)
print("found log file %s" % f)
def test_dispatch(self):
"""Test a scenario where we dispatch to one of several writers
depending on the sensor id.
"""
def generator():
for e in EVENTS2:
yield e
sensor = IterableAsPublisher(generator(), name='sensor')
dispatcher = sensor.dispatch(dispatch_rules)
for s in sensor_ids:
dispatcher.rolling_csv_writer('.', s, sub_topic=s)
dispatcher.subscribe(lambda x: self.assertTrue(False, "bad dispatch of %s" % x))
scheduler = Scheduler(asyncio.get_event_loop())
scheduler.schedule_recurring(sensor)
scheduler.run_forever()
for f in FILES2:
self.assertTrue(os.path.exists(f), 'did not find file %s' % f)
cnt = 0
with open(f, 'r') as fobj:
for line in fobj:
cnt +=1
self.assertEqual(2, cnt, "File %s did not have 2 lines" % f)
print("found log file %s" % f)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
8c9c6f86415414eac65099b6ad036d598482a6ef | cc88beafd7a59a832fecff45f436490f805ba000 | /demos/json_schema.py | 72f3cf8eec40655d6dac240792698b62d8a3ff2c | [
"BSD-3-Clause"
] | permissive | RobSpectre/structures | 6ead59bf37ef02e3c3d2181dc941a2e60f98becb | 5345fb63658eecdc59e08882372294f13b0df889 | refs/heads/master | 2020-12-25T04:29:11.389945 | 2012-08-25T17:45:15 | 2012-08-25T17:45:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | #!/usr/bin/env python
"""{'type': 'object', 'properties': {'personal_thoughts': {'type': 'string', 'id': 'personal_thoughts', 'maxLength': 255}, 'title': {'type': 'string', 'id': 'title', 'maxLength': 40}, 'id': {'type': 'string'}, 'year': {'minimum': 1950, 'type': 'integer', 'id': 'year', 'maximum': 2011}}}
"""
import datetime
from structures.models import Model
from structures.types import StringType, IntType
###
### The base class
###
class Movie(Model):
"""Simple model that has one StringType member
"""
title = StringType(max_length=40)
year = IntType(min_value=1950, max_value=datetime.datetime.now().year)
personal_thoughts = StringType(max_length=255)
m = Movie(title='Some Movie',
year=2011,
personal_thoughts='It was pretty good')
print m.for_jsonschema()
| [
"[email protected]"
] | |
449c3c5e309070e99dc56af5ec86f50dc0f73458 | 5292b03998384c0d2bb5858058892d7e45c5365b | /Hack.lu/2020/right-spot/run.py | 8c7042ab7fb76f17d7a1c5a6d887097b60883e2d | [
"MIT"
] | permissive | TheusZer0/ctf-archives | 430ef80d367b44fd81449bcb108e367842cb8e39 | 033ccf8dab0abdbdbbaa4f0092ab589288ddb4bd | refs/heads/main | 2023-09-04T17:56:24.416820 | 2021-11-21T06:51:27 | 2021-11-21T06:51:27 | 430,603,430 | 1 | 0 | MIT | 2021-11-22T07:24:08 | 2021-11-22T07:24:07 | null | UTF-8 | Python | false | false | 2,998 | py | #!/usr/bin/env python3
import zlib
import sys
import os
import subprocess
import io
import bz2
import sys
from flag import flag
COMPRESSED_LIMIT = 2**20 # 1 MB compressed
DECOMPRESSED_LIMIT = 30*2**20 # 30 MB uncompressed
EXPECTED_STRING = b"pwned!\n"
NUM_TESTS = 4
def compress(data):
if len(data) > DECOMPRESSED_LIMIT:
print('ERROR: File size limit exceeded!')
exit(0)
return bz2.compress(data, compresslevel=9)
def decompress(data):
bz2d = bz2.BZ2Decompressor()
output = bz2d.decompress(data, max_length=DECOMPRESSED_LIMIT)
if bz2d.needs_input == True:
print('ERROR: File size limit exceeded!')
exit(0)
return output
print(f"Welcome! Please send bz2 compressed binary data. How many bytes will you send (MAX: {COMPRESSED_LIMIT})?", flush=True)
try:
num_bytes = int(sys.stdin.readline())
except ValueError:
print("A valid number, please")
exit(0)
if not (0 < num_bytes <= COMPRESSED_LIMIT):
print("Bad number of bytes. Bye!")
exit(0)
print("What is your calculated CRC of the compressed data (hex)?", flush=True)
try:
crc = int(sys.stdin.readline(), 16)
except ValueError:
print("A valid hex crc, please")
exit(0)
print(f"Okay got CRC: {crc:x}, please start sending data", flush=True)
compressed_payload = sys.stdin.buffer.read(num_bytes)
while len(compressed_payload) < num_bytes:
compressed_payload += sys.stdin.buffer.read(0, num_bytes - len(compressed_payload))
print(f"Read {len(compressed_payload)} bytes")
calc_crc = zlib.crc32(compressed_payload)
if crc == calc_crc:
print("[+] CRC Checks out, all good.", flush=True)
else:
print(f"CRC mismatch. Calculated CRC: {calc_crc:x}, expected: {crc:x}")
exit(0)
payload = decompress(compressed_payload)
if len(payload) > DECOMPRESSED_LIMIT:
print(f"Payload too long. Got: {len(payload)} bytes. Limit: {DECOMPRESSED_LIMIT}")
exit(0)
print("[+] Decompressed payload", flush=True)
for seed in range(1 << 5):
print(f"Trying seed: 0x{seed:x}", flush=True)
for i in range(1, NUM_TESTS + 1):
print(f"Try #{i}", flush=True)
try:
p = subprocess.Popen(["./right_spot", str(seed)], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout_output, stderr_output = p.communicate(input=payload, timeout=5)
if stdout_output != EXPECTED_STRING:
print("[-] Mh, not the correct output.")
print(f"Output was: {stdout_output}")
exit(0)
if p.returncode != 0:
print(f"[-] Did not return success status code. Status was: {p.returncode}")
exit(0)
except subprocess.TimeoutExpired as e:
print("[-] Process timed out")
p.kill()
exit(0)
except Exception as e:
print("Something unforeseen went wrong...")
print(e)
p.kill()
exit(0)
print(f"Congrats, here is your flag: {flag}", flush=True)
| [
"[email protected]"
] | |
4ffb855b13fd38f3ff0bf76db89c7a878afc1c77 | e210c28eeed9d38eb78c14b3a6388eca1e0e85d8 | /examples/advanced/sklearn-svm/jobs/sklearn_svm_base/app/custom/svm_learner.py | 070ceb832d5e6448975001a4a6fd155dcae0fea3 | [
"Apache-2.0"
] | permissive | NVIDIA/NVFlare | 5a2d2e4c85a3fd0948e25f1ba510449727529a15 | 1433290c203bd23f34c29e11795ce592bc067888 | refs/heads/main | 2023-08-03T09:21:32.779763 | 2023-07-05T21:17:16 | 2023-07-05T21:17:16 | 388,876,833 | 442 | 140 | Apache-2.0 | 2023-09-14T19:12:35 | 2021-07-23T17:26:12 | Python | UTF-8 | Python | false | false | 3,884 | py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
from sklearn.metrics import roc_auc_score
from sklearn.svm import SVC
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_opt.sklearn.data_loader import load_data_for_range
class SVMLearner(Learner):
def __init__(
self,
data_path: str,
train_start: int,
train_end: int,
valid_start: int,
valid_end: int,
):
super().__init__()
self.data_path = data_path
self.train_start = train_start
self.train_end = train_end
self.valid_start = valid_start
self.valid_end = valid_end
self.train_data = None
self.valid_data = None
self.n_samples = None
self.svm = None
self.kernel = None
self.params = {}
def load_data(self) -> dict:
train_data = load_data_for_range(self.data_path, self.train_start, self.train_end)
valid_data = load_data_for_range(self.data_path, self.valid_start, self.valid_end)
return {"train": train_data, "valid": valid_data}
def initialize(self, fl_ctx: FLContext):
data = self.load_data()
self.train_data = data["train"]
self.valid_data = data["valid"]
# train data size, to be used for setting
# NUM_STEPS_CURRENT_ROUND for potential use in aggregation
self.n_samples = data["train"][-1]
# model will be created after receiving global parameter of kernel
def train(self, curr_round: int, global_param: Optional[dict], fl_ctx: FLContext) -> Tuple[dict, dict]:
if curr_round == 0:
# only perform training on the first round
(x_train, y_train, train_size) = self.train_data
self.kernel = global_param["kernel"]
self.svm = SVC(kernel=self.kernel)
# train model
self.svm.fit(x_train, y_train)
# get support vectors
index = self.svm.support_
local_support_x = x_train[index]
local_support_y = y_train[index]
self.params = {"support_x": local_support_x, "support_y": local_support_y}
elif curr_round > 1:
self.system_panic("Federated SVM only performs training for one round, system exiting.", fl_ctx)
return self.params, self.svm
def validate(self, curr_round: int, global_param: Optional[dict], fl_ctx: FLContext) -> Tuple[dict, dict]:
# local validation with global support vectors
# fit a standalone SVM with the global support vectors
svm_global = SVC(kernel=self.kernel)
support_x = global_param["support_x"]
support_y = global_param["support_y"]
svm_global.fit(support_x, support_y)
# validate global model
(x_valid, y_valid, valid_size) = self.valid_data
y_pred = svm_global.predict(x_valid)
auc = roc_auc_score(y_valid, y_pred)
self.log_info(fl_ctx, f"AUC {auc:.4f}")
metrics = {"AUC": auc}
return metrics, svm_global
def finalize(self, fl_ctx: FLContext) -> None:
# freeing resources in finalize
del self.train_data
del self.valid_data
self.log_info(fl_ctx, "Freed training resources")
| [
"[email protected]"
] | |
5097b846d08047afa9caae82e49275ea9f3c46fa | af8f42e890126aa9af0535991e7c7109db1cedf7 | /hw1/reports/sec2plot.py | b3a31adb9c253b6768e3e192addbcc8116a3fcb0 | [
"MIT"
] | permissive | mwhittaker/homework | 1c482a346a85c0eb9364185cb90ab5efdc67d632 | 2faa90662ea0b256625bd07d0d26de39b4e9a455 | refs/heads/master | 2021-01-22T07:39:51.446384 | 2017-10-18T17:31:05 | 2017-10-18T17:31:05 | 102,309,475 | 4 | 0 | null | 2017-09-04T02:16:25 | 2017-09-04T02:16:25 | null | UTF-8 | Python | false | false | 378 | py | import matplotlib.pyplot as plt
import numpy as np
def main():
X = np.genfromtxt("sec2.txt", delimiter=" ")
steps = X[:,0]
loss = X[:,1]
plt.figure()
plt.semilogy(steps, loss)
plt.grid()
plt.xlabel("Training iteration")
plt.ylabel("Training loss (average mean squared error)")
plt.savefig("sec2.pdf")
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
8ed04321cc923a0b0cf6b320d0b25f6205625691 | b50c44ad44f95035332f371517808406e4a756d0 | /cbvSerializers/cbvApp/migrations/0001_initial.py | 3af20fe7c5f546e1c79919b7d44819cc546f7478 | [] | no_license | anandkumar-design/api1 | d970e336f15b46dceb07ef480aa57fd544a3bd93 | ae767463828138b97f4cf5ef6f7ac2ae4ac33afa | refs/heads/main | 2023-04-25T00:18:13.406364 | 2021-05-13T12:43:35 | 2021-05-13T12:43:35 | 367,045,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | # Generated by Django 3.2 on 2021-05-04 08:16
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Student',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=20)),
('score', models.DecimalField(decimal_places=10, max_digits=19)),
],
),
]
| [
"[email protected]"
] | |
22e6134ec7f1f2c305fd830471c62ba2d52f78ba | e3a97b316fdf07b170341da206163a865f9e812c | /python/kwiver/vital/tests/test_category_hierarchy.py | c9497979ff437d836cb13a371ab1157285bb6372 | [
"BSD-3-Clause"
] | permissive | Kitware/kwiver | 09133ede9d05c33212839cc29d396aa8ca21baaf | a422409b83f78f31cda486e448e8009513e75427 | refs/heads/master | 2023-08-28T10:41:58.077148 | 2023-07-28T21:18:52 | 2023-07-28T21:18:52 | 23,229,909 | 191 | 92 | NOASSERTION | 2023-06-26T17:18:20 | 2014-08-22T15:22:20 | C++ | UTF-8 | Python | false | false | 16,958 | py | """
ckwg +31
Copyright 2020 by Kitware, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither name of Kitware, Inc. nor the names of any contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================================================================
Tests for Python interface to vital::category_hierarchy
"""
from kwiver.vital.types import CategoryHierarchy
import nose.tools as nt
import os
import tempfile
import unittest
class TestVitalCategoryHierarchy(unittest.TestCase):
def setUp(self):
"""
First create the following hierarchy using lists
class0
/ \
/ \
class1_0 class1_1
/
/
class2_0
where class0 has id 0,
class1_0 has id 1,
class1_1 has id 2, and
class2_0 has id 3
"""
self.class_names = ["class0", "class1_0", "class1_1", "class2_0"]
self.parent_names = ["", "class0", "class0", "class1_0"]
self.ids = [0, 1, 2, 3]
# Now write to a file to create a similar hierarchy
# Unfortunately, in order for this to work on Windows, we can't
# utilize tempfile's automatic cleanup, as the C++ process won't be
# able to read the file if it's still open in Python
# So create a file and manually delete in tearDown()
self.fp = tempfile.NamedTemporaryFile(mode="w+", delete=False)
# This hierarchy is the same as the one constructed using lists,
# Except class2_0 also has class1_1 as a parent. Each class
# also has 2 synonyms of the form:
# {classname}_syn{syn_num}, where syn_num is 0 or 1
self.fp.writelines(
[
"class0 class0_syn0 class0_syn1",
"\nclass1_0 :parent=class0 class1_0_syn0 class1_0_syn1",
"\nclass1_1 class1_1_syn0 class1_1_syn1 :parent=class0",
"\nclass2_0 class2_0_syn0 :parent=class1_0 :parent=class1_1 class2_0_syn1",
"\n#class5",
]
)
self.fp.flush()
# Close so C++ can read
self.fp.close()
# Manually delete the file
def tearDown(self):
os.remove(self.fp.name)
nt.assert_false(os.path.exists(self.fp.name))
def test_default_constructor(self):
CategoryHierarchy()
def test_construct_from_file(self):
CategoryHierarchy(self.fp.name)
def test_constructor_from_file_no_exist(self):
expected_err_msg = "Unable to open nonexistant_file.txt"
with nt.assert_raises_regexp(RuntimeError, expected_err_msg):
CategoryHierarchy("nonexistant_file.txt")
def test_construct_from_lists(self):
# Should be able to call with just class_names
CategoryHierarchy(self.class_names)
# class_names and parent_names
CategoryHierarchy(self.class_names, self.parent_names)
# class_names and ids
CategoryHierarchy(self.class_names, ids=self.ids)
# and all 3
CategoryHierarchy(self.class_names, self.parent_names, self.ids)
def _create_hierarchies(self):
empty = CategoryHierarchy()
from_file = CategoryHierarchy(self.fp.name)
from_lists = CategoryHierarchy(self.class_names, self.parent_names, self.ids)
return (empty, from_file, from_lists)
def test_constructor_throws_exceptions(self):
# Passing class_names and parent_names of different sizes
with nt.assert_raises_regexp(ValueError, "Parameter vector sizes differ."):
CategoryHierarchy(self.class_names, self.parent_names[:-1])
with nt.assert_raises_regexp(ValueError, "Parameter vector sizes differ."):
CategoryHierarchy(self.class_names[:-1], self.parent_names)
with nt.assert_raises_regexp(ValueError, "Parameter vector sizes differ."):
CategoryHierarchy([], self.parent_names)
# Passing class_names and ids of different sizes
with nt.assert_raises_regexp(ValueError, "Parameter vector sizes differ."):
CategoryHierarchy(self.class_names, ids=self.ids[:-1])
with nt.assert_raises_regexp(ValueError, "Parameter vector sizes differ."):
CategoryHierarchy(self.class_names[:-1], ids=self.ids)
with nt.assert_raises_regexp(ValueError, "Parameter vector sizes differ."):
CategoryHierarchy([], ids=self.ids)
# Passing empty class_names also throws exception
with nt.assert_raises_regexp(ValueError, "Parameter vector are empty."):
CategoryHierarchy([])
def test_initial_classes(self):
empty, from_file, from_lists = self._create_hierarchies()
# First check that each hierarchy does/does not
# have the expected class names
for name, id_ in zip(self.class_names, self.ids):
# empty
nt.assert_false(
empty.has_class_name(name),
"Empty hierarchy had classname {}".format(name),
)
# from_file
nt.ok_(
from_file.has_class_name(name),
"heirarchy constructed from file does not have {}".format(name),
)
nt.assert_equals(from_file.get_class_id(name), id_)
nt.assert_equals(from_file.get_class_name(name), name)
# from_lists
nt.ok_(
from_lists.has_class_name(name),
"heirarchy constructed from lists does not have {}".format(name),
)
nt.assert_equals(from_lists.get_class_id(name), id_)
nt.assert_equals(from_lists.get_class_name(name), name)
# Tests for empty
nt.assert_equals(empty.all_class_names(), [])
nt.assert_equals(empty.size(), 0)
# Tests for from_file
nt.assert_equals(from_file.all_class_names(), self.class_names)
# Each class has 2 synonyms, so size is 3 * # classes
nt.assert_equals(from_file.size(), 3 * len(self.class_names))
# Make sure class5, which was commented out, is not present
nt.assert_false(from_file.has_class_name("class5"))
# Tests for from_lists
nt.assert_equals(from_lists.all_class_names(), self.class_names)
nt.assert_equals(from_lists.size(), len(self.class_names))
# Only hierarchies constructed from files can be constructed with synonyms
def test_initial_synonyms(self):
ch = CategoryHierarchy(self.fp.name)
for cname in ch.all_class_names():
syn0_name = cname + "_syn0"
syn1_name = cname + "_syn1"
nt.ok_(ch.has_class_name(syn0_name))
nt.ok_(ch.has_class_name(syn1_name))
nt.assert_equals(ch.get_class_name(syn0_name), cname)
nt.assert_equals(ch.get_class_name(syn1_name), cname)
def test_initial_relationships(self):
empty, from_file, from_lists = self._create_hierarchies()
# Tests for empty
nt.assert_equals(empty.child_class_names(), [])
# Tests for from_file
nt.assert_equals(from_file.child_class_names(), ["class2_0"])
nt.assert_equals(from_file.get_class_parents("class0"), [])
nt.assert_equals(from_file.get_class_parents("class2_0"), ["class1_0", "class1_1"])
nt.assert_equals(from_file.get_class_parents("class1_0"), ["class0"])
nt.assert_equals(from_file.get_class_parents("class1_1"), ["class0"])
# Tests for from_lists
nt.assert_equals(from_lists.child_class_names(), ["class1_1", "class2_0"])
nt.assert_equals(from_lists.get_class_parents("class0"), [])
nt.assert_equals(from_lists.get_class_parents("class2_0"), ["class1_0"])
nt.assert_equals(from_lists.get_class_parents("class1_0"), ["class0"])
nt.assert_equals(from_lists.get_class_parents("class1_1"), ["class0"])
def test_add_class(self):
ch = CategoryHierarchy()
# Check default for parent_name and id params
ch.add_class("class0")
nt.assert_equals(ch.get_class_id("class0"), -1)
# Now for parent_name
ch.add_class("class1", id=0)
nt.assert_equals(ch.get_class_id("class1"), 0)
# Now for id
ch.add_class("class2", parent_name="class1")
nt.assert_equals(ch.get_class_id("class2"), -1)
# Check has_class_name returns correct result
nt.ok_(ch.has_class_name("class0"))
nt.ok_(ch.has_class_name("class1"))
nt.ok_(ch.has_class_name("class2"))
# Check class list
nt.assert_equals(ch.all_class_names(), ["class1", "class0", "class2"])
nt.assert_equals(ch.size(), 3)
# Check relationships are correct
# TODO: Should this only be class2 and class0? Current implementation
# of add_class only adds class1 to class2's parents. Class2 isn't added
# to Class1's list of children, which makes it a child class.
nt.assert_equals(ch.child_class_names(), ["class1", "class0", "class2"])
nt.assert_equals(ch.get_class_parents("class0"), [])
nt.assert_equals(ch.get_class_parents("class1"), [])
nt.assert_equals(ch.get_class_parents("class2"), ["class1"])
def test_add_class_already_exists(self):
ch = CategoryHierarchy(self.class_names, self.parent_names, self.ids)
with nt.assert_raises_regexp(RuntimeError, "Category already exists"):
ch.add_class(self.class_names[0])
ch.add_class("new_class")
with nt.assert_raises_regexp(RuntimeError, "Category already exists"):
ch.add_class("new_class")
def test_add_relationship(self):
ch = CategoryHierarchy()
ch.add_class("class0")
ch.add_class("class1_0")
ch.add_class("class1_1")
ch.add_class("class2_0")
# Same as the file
ch.add_relationship("class1_0", "class0")
ch.add_relationship("class1_1", "class0")
ch.add_relationship("class2_0", "class1_0")
ch.add_relationship("class2_0", "class1_1")
nt.assert_equals(ch.child_class_names(), ["class2_0"])
nt.assert_equals(ch.get_class_parents("class2_0"), ["class1_0", "class1_1"])
nt.assert_equals(ch.get_class_parents("class1_0"), ["class0"])
nt.assert_equals(ch.get_class_parents("class1_1"), ["class0"])
def test_add_synonym(self):
ch = CategoryHierarchy(self.class_names, self.parent_names, self.ids)
ch.add_synonym("class2_0", "class2_0_syn0")
ch.add_synonym("class2_0", "class2_0_syn1")
ch.add_synonym("class1_0", "class1_0_syn0")
ch.add_synonym("class1_0", "class1_0_syn1")
# First check the old classes exist
nt.assert_equals(ch.all_class_names(), self.class_names)
# Check the size
nt.assert_equals(ch.size(), 8)
# Now check synonyms exist
nt.ok_(ch.has_class_name("class2_0_syn0"))
nt.ok_(ch.has_class_name("class2_0_syn1"))
nt.ok_(ch.has_class_name("class1_0_syn0"))
nt.ok_(ch.has_class_name("class1_0_syn1"))
# Check the name of the actual category
nt.assert_equals(ch.get_class_name("class2_0_syn0"), "class2_0")
nt.assert_equals(ch.get_class_name("class2_0_syn1"), "class2_0")
nt.assert_equals(ch.get_class_name("class1_0_syn0"), "class1_0")
nt.assert_equals(ch.get_class_name("class1_0_syn1"), "class1_0")
# Now check that the relationships are still intact
nt.assert_equals(ch.get_class_parents("class2_0_syn0"), ["class1_0"])
nt.assert_equals(ch.get_class_parents("class2_0_syn1"), ["class1_0"])
nt.assert_equals(ch.get_class_parents("class1_0_syn0"), ["class0"])
nt.assert_equals(ch.get_class_parents("class1_0_syn1"), ["class0"])
def test_add_synonym_already_exists(self):
ch = CategoryHierarchy()
ch.add_class("class0")
ch.add_synonym("class0", "class0_syn0")
ch.add_synonym("class0", "class0_syn1")
expected_err_msg = "Synonym name already exists in hierarchy"
with nt.assert_raises_regexp(RuntimeError, expected_err_msg):
ch.add_synonym("class0", "class0_syn0")
with nt.assert_raises_regexp(RuntimeError, expected_err_msg):
ch.add_synonym("class0", "class0_syn1")
def test_load_from_file(self):
ch = CategoryHierarchy()
ch.add_class("class-1")
ch.add_synonym("class-1", "class-1_syn0")
ch.add_synonym("class-1", "class-1_syn1")
ch.load_from_file(self.fp.name)
nt.assert_equals(ch.all_class_names(), self.class_names + ["class-1"])
# Check synonyms
for cname in self.class_names + ["class-1"]:
nt.ok_(ch.has_class_name(cname + "_syn0"))
nt.ok_(ch.has_class_name(cname + "_syn1"))
nt.assert_equals(ch.get_class_name(cname + "_syn0"), cname)
nt.assert_equals(ch.get_class_name(cname + "_syn1"), cname)
# Now check that the relationships are still intact
nt.assert_equals(ch.child_class_names(), ["class2_0", "class-1"])
nt.assert_equals(ch.get_class_parents("class0"), [])
nt.assert_equals(ch.get_class_parents("class2_0"), ["class1_0", "class1_1"])
nt.assert_equals(ch.get_class_parents("class1_0"), ["class0"])
nt.assert_equals(ch.get_class_parents("class1_1"), ["class0"])
def test_load_from_file_not_exist(self):
ch = CategoryHierarchy()
expected_err_msg = "Unable to open nonexistant_file.txt"
with nt.assert_raises_regexp(RuntimeError, expected_err_msg):
ch.load_from_file("nonexistant_file.txt")
# Some functions throw exceptions if the category
# can't be found. Those will be tested here
def test_category_not_exist(self):
chs = list(self._create_hierarchies())
expected_err_msg = "Class node absent_class does not exist"
for ch in chs:
with nt.assert_raises_regexp(RuntimeError, expected_err_msg):
ch.add_class("new_class1", "absent_class")
with nt.assert_raises_regexp(RuntimeError, expected_err_msg):
ch.get_class_name("absent_class")
with nt.assert_raises_regexp(RuntimeError, expected_err_msg):
ch.get_class_id("absent_class")
with nt.assert_raises_regexp(RuntimeError, expected_err_msg):
ch.get_class_parents("absent_class")
with nt.assert_raises_regexp(RuntimeError, expected_err_msg):
ch.add_relationship("absent_class", "another_absent_class")
ch.add_class("new_class2")
with nt.assert_raises_regexp(RuntimeError, expected_err_msg):
ch.add_relationship("new_class2", "absent_class")
with nt.assert_raises_regexp(RuntimeError, expected_err_msg):
ch.add_synonym("absent_class", "synonym")
# Extra test for the sort function used
# in a few member functions. all_class_names()
# essentially returns the result
def test_sort(self):
ch = CategoryHierarchy()
# Adding them in this way forces
# every type of comparison to be made
ch.add_class("a", id=1)
ch.add_class("c")
ch.add_class("b")
ch.add_class("d", id=0)
# names with ids are first sorted (in alphabetical order), followed by
# names without ids in alphabetical order
nt.assert_equals(ch.all_class_names(), ["d", "a", "b", "c"])
nt.assert_equals(ch.child_class_names(), ["d", "a", "b", "c"])
| [
"[email protected]"
] | |
68ea1dea2939203e6f537230def02ae234372113 | e13c98f36c362717fdf22468b300321802346ef5 | /documents/migrations/0002_auto_20161206_1643.py | a5421c43dfb9d72c97c9d451bbe268874e6e6229 | [] | no_license | alexmon1989/libraries_portal | 2415cc49de33459266a9f18ed8bb34ac99d3eb7c | 277081e09f6347c175775337bffba074a35f3b92 | refs/heads/master | 2021-01-23T07:25:53.884795 | 2018-12-25T14:29:29 | 2018-12-25T14:29:29 | 80,501,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,158 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-06 14:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('documents', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='anotherperson',
options={'verbose_name': 'ะััะณะฐั ะฟะตััะพะฝั', 'verbose_name_plural': 'ะััะณะธะต ะฟะตััะพะฝั'},
),
migrations.AlterModelOptions(
name='document',
options={'verbose_name': 'ะะพะบัะผะตะฝั', 'verbose_name_plural': 'ะะพะบัะผะตะฝัั'},
),
migrations.AlterModelOptions(
name='documenttype',
options={'verbose_name': 'ะขะธะฟ ะดะพะบัะผะตะฝัะฐ', 'verbose_name_plural': 'ะขะธะฟั ะดะพะบัะผะตะฝัะพะฒ'},
),
migrations.AddField(
model_name='document',
name='catalog_number',
field=models.CharField(default=1, max_length=255, verbose_name='ะจะธัั ั
ัะฐะฝะตะฝะธั (โ ะฒ ะบะฐัะฐะปะพะณะต)'),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
0135a94e64c60142d8c29bfdaba4788908690526 | 376150fe6b4dd5d8c3caa65714aa47bc35e31784 | /nintendo/games.py | dd8c5a5db2e8ac06d7e378a4ab5f9226ca8181f6 | [
"MIT"
] | permissive | tenda-gumi/NintendoClients | 977d5de576a0216a2e6c894bfa5de1658f8ef5de | 67f2600f68e441980187932e8521d6dcc69dcc24 | refs/heads/master | 2020-06-17T14:33:52.314419 | 2019-07-10T04:19:38 | 2019-07-10T04:19:38 | 195,951,685 | 0 | 0 | null | 2019-07-09T07:03:23 | 2019-07-09T07:03:23 | null | UTF-8 | Python | false | false | 1,320 | py |
#===== Wii U Games =====#
class Friends:
TITLE_ID_EUR = 0x10001C00
TITLE_ID_USA = 0x10001C00
TITLE_ID_JAP = 0x10001C00
LATEST_VERSION = 0
GAME_SERVER_ID = 0x3200
ACCESS_KEY = "ridfebb9"
NEX_VERSION = 20000
class DKCTF:
TITLE_ID_EUR = 0x0005000010138300
TITLE_ID_USA = 0x0005000010137F00
TITLE_ID_JAP = 0x0005000010144800
LATEST_VERSION = 17
GAME_SERVER_ID = 0x10144800
ACCESS_KEY = "7fcf384a"
NEX_VERSION = 30400 #3.4.0
SERVER_VERSION = 3
class MK8:
TITLE_ID_EUR = 0x000500001010ED00
TITLE_ID_USA = 0x000500001010EC00
TITLE_ID_JAP = 0x000500001010EB00
LATEST_VERSION = 64
GAME_SERVER_ID = 0x1010EB00
ACCESS_KEY = "25dbf96a"
NEX_VERSION = 30504 #3.5.4 (AMK patch)
SERVER_VERSION = 2002
class SMM:
TITLE_ID_EUR = 0x000500001018DD00
TITLE_ID_USA = 0x000500001018DC00
TITLE_ID_JAP = 0x000500001018DB00
LATEST_VERSION = 272
GAME_SERVER_ID = 0x1018DB00
ACCESS_KEY = "9f2b4678"
NEX_VERSION = 30810 #3.8.10 (AMA patch)
SERVER_VERSION = 3017
#===== Switch Games =====#
class MK8Deluxe:
GAME_SERVER_ID = 0x2B309E01
ACCESS_KEY = "09c1c475"
NEX_VERSION = 40007 #4.0.7 (apptrbs)
class SMO:
GAME_SERVER_ID = 0x255BA201
ACCESS_KEY = "afef0ecf"
NEX_VERSION = 40302 #4.3.2
class SMM2:
GAME_SERVER_ID = 0x22306D00
ACCESS_KEY = "fdf6617f"
NEX_VERSION = 40601 #4.6.15 (appslop)
| [
"[email protected]"
] | |
4b27e8803d48b26d90c568aa778d7eec4c44dc85 | 2210a763aa4679283b8d3b59e862caf691d4d5af | /projects/migrations/0003_userprofile.py | b8d9373aa4b9f3d471c16018d1fdfdf8b3e7faea | [
"BSD-2-Clause"
] | permissive | dymaxionlabs/analytics-backend | 21c7dd4579188b20214c7c8ac92db26ca04348f8 | fb801b184e4e510d54e8addb283fd202c9dfe7b1 | refs/heads/master | 2022-05-10T23:30:35.958044 | 2022-04-24T13:58:59 | 2022-04-24T13:58:59 | 192,096,995 | 0 | 0 | BSD-3-Clause | 2022-04-24T13:59:11 | 2019-06-15T15:55:28 | Python | UTF-8 | Python | false | false | 863 | py | # Generated by Django 2.1.5 on 2019-01-21 03:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('projects', '0002_auto_20181130_1704'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('location', models.CharField(blank=True, max_length=120)),
('phone', models.CharField(blank=True, max_length=40)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
eef8ad3df7f2867949338d395c89c8fdc5d64834 | 1058984cbca36552120092af1f849cea27662c50 | /rebench/tests/interop/time_adapter_test.py | a73f964d66ac61d2533d516993e656b4d65258f5 | [
"MIT"
] | permissive | smarr/ReBench | 21437c7a348a1821f8c5e5016539211376439447 | fd8fa6beeac13c87e848ea76efb1243d1e6ee3ae | refs/heads/master | 2023-08-28T00:38:18.378579 | 2023-08-06T15:11:50 | 2023-08-06T15:11:50 | 1,263,079 | 71 | 19 | MIT | 2023-08-06T15:11:52 | 2011-01-17T10:43:28 | Python | UTF-8 | Python | false | false | 2,355 | py | # Copyright (c) 2016 Stefan Marr <http://www.stefan-marr.de/>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unittest import TestCase
from ...interop.adapter import OutputNotParseable
from ...interop.time_adapter import TimeAdapter, TimeManualAdapter
class _TestRunId(object):
def cmdline_for_next_invocation(self):
return "FOO"
class TimeAdapterTest(TestCase):
def test_acquire_command(self):
adapter = TimeAdapter(False, None)
cmd = adapter.acquire_command(_TestRunId())
self.assertRegex(cmd, r"^/.*/bin/g?time")
def test_parse_data(self):
data = """real 11.00
user 5.00
sys 1.00"""
adapter = TimeAdapter(False, None)
TimeAdapter._use_formatted_time = False
data = adapter.parse_data(data, None, 1)
self.assertEqual(1, len(data))
measurements = data[0].get_measurements()
self.assertEqual(3, len(measurements))
self.assertEqual(11000, data[0].get_total_value())
def test_parse_no_data(self):
adapter = TimeAdapter(False, None)
self.assertRaises(OutputNotParseable, adapter.parse_data, "", None, 1)
def test_manual_adapter(self):
adapter = TimeManualAdapter(False, None)
cmd = adapter.acquire_command(_TestRunId())
self.assertEqual("FOO", cmd)
| [
"[email protected]"
] | |
fb3ae55ef32a7d7e54a07342319d8f0569c814a0 | 6364bb727b623f06f6998941299c49e7fcb1d437 | /msgraph-cli-extensions/src/education/azext_education/vendored_sdks/education/operations/_education_class_assignment_submission_operations.py | 40c6e974d3dfc0da75b45fd0d3fe60b3bdfb72ea | [
"MIT"
] | permissive | kanakanaidu/msgraph-cli | 1d6cd640f4e10f4bdf476d44d12a7c48987b1a97 | b3b87f40148fb691a4c331f523ca91f8a5cc9224 | refs/heads/main | 2022-12-25T08:08:26.716914 | 2020-09-23T14:29:13 | 2020-09-23T14:29:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85,715 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class EducationClassAssignmentSubmissionOperations(object):
"""EducationClassAssignmentSubmissionOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~education.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def return_method(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphEducationSubmission"
"""Invoke action return.
Invoke action return.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphEducationSubmission, or the result of cls(response)
:rtype: ~education.models.MicrosoftGraphEducationSubmission
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphEducationSubmission"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.return_method.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphEducationSubmission', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
return_method.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/microsoft.graph.return'} # type: ignore
def submit(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphEducationSubmission"
"""Invoke action submit.
Invoke action submit.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphEducationSubmission, or the result of cls(response)
:rtype: ~education.models.MicrosoftGraphEducationSubmission
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphEducationSubmission"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.submit.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphEducationSubmission', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
submit.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/microsoft.graph.submit'} # type: ignore
def unsubmit(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphEducationSubmission"
"""Invoke action unsubmit.
Invoke action unsubmit.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphEducationSubmission, or the result of cls(response)
:rtype: ~education.models.MicrosoftGraphEducationSubmission
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphEducationSubmission"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.unsubmit.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphEducationSubmission', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
unsubmit.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/microsoft.graph.unsubmit'} # type: ignore
def list_outcome(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
orderby=None, # type: Optional[List[Union[str, "models.Enum139"]]]
select=None, # type: Optional[List[Union[str, "models.Enum140"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfEducationOutcome"]
"""Get outcomes from education.
Get outcomes from education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param orderby: Order items by property values.
:type orderby: list[str or ~education.models.Enum139]
:param select: Select properties to be returned.
:type select: list[str or ~education.models.Enum140]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfEducationOutcome or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~education.models.CollectionOfEducationOutcome]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfEducationOutcome"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.list_outcome.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfEducationOutcome', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_outcome.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/outcomes'} # type: ignore
def create_outcome(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
id=None, # type: Optional[str]
last_modified_date_time=None, # type: Optional[datetime.datetime]
microsoft_graph_identity_id=None, # type: Optional[str]
display_name=None, # type: Optional[str]
id1=None, # type: Optional[str]
microsoft_graph_identity_display_name=None, # type: Optional[str]
id2=None, # type: Optional[str]
display_name1=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphEducationOutcome"
"""Create new navigation property to outcomes for education.
Create new navigation property to outcomes for education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param id: Read-only.
:type id: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param microsoft_graph_identity_id: Unique identifier for the identity.
:type microsoft_graph_identity_id: str
:param display_name: The identity's display name. Note that this may not always be available or
up to date. For example, if a user changes their display name, the API may show the new value
in a future response, but the items associated with the user won't show up as having changed
when using delta.
:type display_name: str
:param id1: Unique identifier for the identity.
:type id1: str
:param microsoft_graph_identity_display_name: The identity's display name. Note that this may
not always be available or up to date. For example, if a user changes their display name, the
API may show the new value in a future response, but the items associated with the user won't
show up as having changed when using delta.
:type microsoft_graph_identity_display_name: str
:param id2: Unique identifier for the identity.
:type id2: str
:param display_name1: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name1: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphEducationOutcome, or the result of cls(response)
:rtype: ~education.models.MicrosoftGraphEducationOutcome
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphEducationOutcome"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.MicrosoftGraphEducationOutcome(id=id, last_modified_date_time=last_modified_date_time, id_last_modified_by_user_id=microsoft_graph_identity_id, display_name_last_modified_by_user_display_name=display_name, id_last_modified_by_device_id=id1, display_name_last_modified_by_device_display_name=microsoft_graph_identity_display_name, id_last_modified_by_application_id=id2, display_name_last_modified_by_application_display_name=display_name1)
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_outcome.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'MicrosoftGraphEducationOutcome')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphEducationOutcome', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_outcome.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/outcomes'} # type: ignore
def get_outcome(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
education_outcome_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum141"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphEducationOutcome"
"""Get outcomes from education.
Get outcomes from education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param education_outcome_id: key: educationOutcome-id of educationOutcome.
:type education_outcome_id: str
:param select: Select properties to be returned.
:type select: list[str or ~education.models.Enum141]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphEducationOutcome, or the result of cls(response)
:rtype: ~education.models.MicrosoftGraphEducationOutcome
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphEducationOutcome"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_outcome.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
'educationOutcome-id': self._serialize.url("education_outcome_id", education_outcome_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphEducationOutcome', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_outcome.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/outcomes/{educationOutcome-id}'} # type: ignore
def update_outcome(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
education_outcome_id, # type: str
id=None, # type: Optional[str]
last_modified_date_time=None, # type: Optional[datetime.datetime]
microsoft_graph_identity_id=None, # type: Optional[str]
display_name=None, # type: Optional[str]
id1=None, # type: Optional[str]
microsoft_graph_identity_display_name=None, # type: Optional[str]
id2=None, # type: Optional[str]
display_name1=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property outcomes in education.
Update the navigation property outcomes in education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param education_outcome_id: key: educationOutcome-id of educationOutcome.
:type education_outcome_id: str
:param id: Read-only.
:type id: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param microsoft_graph_identity_id: Unique identifier for the identity.
:type microsoft_graph_identity_id: str
:param display_name: The identity's display name. Note that this may not always be available or
up to date. For example, if a user changes their display name, the API may show the new value
in a future response, but the items associated with the user won't show up as having changed
when using delta.
:type display_name: str
:param id1: Unique identifier for the identity.
:type id1: str
:param microsoft_graph_identity_display_name: The identity's display name. Note that this may
not always be available or up to date. For example, if a user changes their display name, the
API may show the new value in a future response, but the items associated with the user won't
show up as having changed when using delta.
:type microsoft_graph_identity_display_name: str
:param id2: Unique identifier for the identity.
:type id2: str
:param display_name1: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name1: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.MicrosoftGraphEducationOutcome(id=id, last_modified_date_time=last_modified_date_time, id_last_modified_by_user_id=microsoft_graph_identity_id, display_name_last_modified_by_user_display_name=display_name, id_last_modified_by_device_id=id1, display_name_last_modified_by_device_display_name=microsoft_graph_identity_display_name, id_last_modified_by_application_id=id2, display_name_last_modified_by_application_display_name=display_name1)
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.update_outcome.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
'educationOutcome-id': self._serialize.url("education_outcome_id", education_outcome_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'MicrosoftGraphEducationOutcome')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_outcome.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/outcomes/{educationOutcome-id}'} # type: ignore
def list_resource(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
orderby=None, # type: Optional[List[Union[str, "models.Enum142"]]]
select=None, # type: Optional[List[Union[str, "models.Enum143"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfEducationSubmissionResource"]
"""Get resources from education.
Get resources from education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param orderby: Order items by property values.
:type orderby: list[str or ~education.models.Enum142]
:param select: Select properties to be returned.
:type select: list[str or ~education.models.Enum143]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfEducationSubmissionResource or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~education.models.CollectionOfEducationSubmissionResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfEducationSubmissionResource"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.list_resource.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfEducationSubmissionResource', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_resource.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/resources'} # type: ignore
def create_resource(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
id=None, # type: Optional[str]
assignment_resource_url=None, # type: Optional[str]
display_name=None, # type: Optional[str]
created_date_time=None, # type: Optional[datetime.datetime]
last_modified_date_time=None, # type: Optional[datetime.datetime]
microsoft_graph_identity_id=None, # type: Optional[str]
microsoft_graph_identity_display_name=None, # type: Optional[str]
id1=None, # type: Optional[str]
display_name1=None, # type: Optional[str]
id2=None, # type: Optional[str]
display_name2=None, # type: Optional[str]
id3=None, # type: Optional[str]
display_name3=None, # type: Optional[str]
id4=None, # type: Optional[str]
display_name4=None, # type: Optional[str]
id5=None, # type: Optional[str]
display_name5=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphEducationSubmissionResource"
"""Create new navigation property to resources for education.
Create new navigation property to resources for education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param id: Read-only.
:type id: str
:param assignment_resource_url:
:type assignment_resource_url: str
:param display_name:
:type display_name: str
:param created_date_time:
:type created_date_time: ~datetime.datetime
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param microsoft_graph_identity_id: Unique identifier for the identity.
:type microsoft_graph_identity_id: str
:param microsoft_graph_identity_display_name: The identity's display name. Note that this may
not always be available or up to date. For example, if a user changes their display name, the
API may show the new value in a future response, but the items associated with the user won't
show up as having changed when using delta.
:type microsoft_graph_identity_display_name: str
:param id1: Unique identifier for the identity.
:type id1: str
:param display_name1: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name1: str
:param id2: Unique identifier for the identity.
:type id2: str
:param display_name2: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name2: str
:param id3: Unique identifier for the identity.
:type id3: str
:param display_name3: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name3: str
:param id4: Unique identifier for the identity.
:type id4: str
:param display_name4: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name4: str
:param id5: Unique identifier for the identity.
:type id5: str
:param display_name5: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name5: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphEducationSubmissionResource, or the result of cls(response)
:rtype: ~education.models.MicrosoftGraphEducationSubmissionResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphEducationSubmissionResource"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.MicrosoftGraphEducationSubmissionResource(id=id, assignment_resource_url=assignment_resource_url, display_name_resource_display_name=display_name, created_date_time=created_date_time, last_modified_date_time=last_modified_date_time, id_resource_last_modified_by_user_id=microsoft_graph_identity_id, display_name_resource_last_modified_by_user_display_name=microsoft_graph_identity_display_name, id_resource_last_modified_by_device_id=id1, display_name_resource_last_modified_by_device_display_name=display_name1, id_resource_last_modified_by_application_id=id2, display_name_resource_last_modified_by_application_display_name=display_name2, id_resource_created_by_user_id=id3, display_name_resource_created_by_user_display_name=display_name3, id_resource_created_by_device_id=id4, display_name_resource_created_by_device_display_name=display_name4, id_resource_created_by_application_id=id5, display_name_resource_created_by_application_display_name=display_name5)
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_resource.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'MicrosoftGraphEducationSubmissionResource')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphEducationSubmissionResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_resource.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/resources'} # type: ignore
def get_resource(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
education_submission_resource_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum144"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphEducationSubmissionResource"
"""Get resources from education.
Get resources from education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param education_submission_resource_id: key: educationSubmissionResource-id of
educationSubmissionResource.
:type education_submission_resource_id: str
:param select: Select properties to be returned.
:type select: list[str or ~education.models.Enum144]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphEducationSubmissionResource, or the result of cls(response)
:rtype: ~education.models.MicrosoftGraphEducationSubmissionResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphEducationSubmissionResource"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_resource.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
'educationSubmissionResource-id': self._serialize.url("education_submission_resource_id", education_submission_resource_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphEducationSubmissionResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_resource.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/resources/{educationSubmissionResource-id}'} # type: ignore
def update_resource(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
education_submission_resource_id, # type: str
id=None, # type: Optional[str]
assignment_resource_url=None, # type: Optional[str]
display_name=None, # type: Optional[str]
created_date_time=None, # type: Optional[datetime.datetime]
last_modified_date_time=None, # type: Optional[datetime.datetime]
microsoft_graph_identity_id=None, # type: Optional[str]
microsoft_graph_identity_display_name=None, # type: Optional[str]
id1=None, # type: Optional[str]
display_name1=None, # type: Optional[str]
id2=None, # type: Optional[str]
display_name2=None, # type: Optional[str]
id3=None, # type: Optional[str]
display_name3=None, # type: Optional[str]
id4=None, # type: Optional[str]
display_name4=None, # type: Optional[str]
id5=None, # type: Optional[str]
display_name5=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property resources in education.
Update the navigation property resources in education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param education_submission_resource_id: key: educationSubmissionResource-id of
educationSubmissionResource.
:type education_submission_resource_id: str
:param id: Read-only.
:type id: str
:param assignment_resource_url:
:type assignment_resource_url: str
:param display_name:
:type display_name: str
:param created_date_time:
:type created_date_time: ~datetime.datetime
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param microsoft_graph_identity_id: Unique identifier for the identity.
:type microsoft_graph_identity_id: str
:param microsoft_graph_identity_display_name: The identity's display name. Note that this may
not always be available or up to date. For example, if a user changes their display name, the
API may show the new value in a future response, but the items associated with the user won't
show up as having changed when using delta.
:type microsoft_graph_identity_display_name: str
:param id1: Unique identifier for the identity.
:type id1: str
:param display_name1: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name1: str
:param id2: Unique identifier for the identity.
:type id2: str
:param display_name2: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name2: str
:param id3: Unique identifier for the identity.
:type id3: str
:param display_name3: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name3: str
:param id4: Unique identifier for the identity.
:type id4: str
:param display_name4: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name4: str
:param id5: Unique identifier for the identity.
:type id5: str
:param display_name5: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name5: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.MicrosoftGraphEducationSubmissionResource(id=id, assignment_resource_url=assignment_resource_url, display_name_resource_display_name=display_name, created_date_time=created_date_time, last_modified_date_time=last_modified_date_time, id_resource_last_modified_by_user_id=microsoft_graph_identity_id, display_name_resource_last_modified_by_user_display_name=microsoft_graph_identity_display_name, id_resource_last_modified_by_device_id=id1, display_name_resource_last_modified_by_device_display_name=display_name1, id_resource_last_modified_by_application_id=id2, display_name_resource_last_modified_by_application_display_name=display_name2, id_resource_created_by_user_id=id3, display_name_resource_created_by_user_display_name=display_name3, id_resource_created_by_device_id=id4, display_name_resource_created_by_device_display_name=display_name4, id_resource_created_by_application_id=id5, display_name_resource_created_by_application_display_name=display_name5)
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.update_resource.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
'educationSubmissionResource-id': self._serialize.url("education_submission_resource_id", education_submission_resource_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'MicrosoftGraphEducationSubmissionResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_resource.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/resources/{educationSubmissionResource-id}'} # type: ignore
def list_submitted_resource(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
orderby=None, # type: Optional[List[Union[str, "models.Enum145"]]]
select=None, # type: Optional[List[Union[str, "models.Enum146"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfEducationSubmissionResource0"]
"""Get submittedResources from education.
Get submittedResources from education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param orderby: Order items by property values.
:type orderby: list[str or ~education.models.Enum145]
:param select: Select properties to be returned.
:type select: list[str or ~education.models.Enum146]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfEducationSubmissionResource0 or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~education.models.CollectionOfEducationSubmissionResource0]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfEducationSubmissionResource0"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.list_submitted_resource.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfEducationSubmissionResource0', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_submitted_resource.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/submittedResources'} # type: ignore
def create_submitted_resource(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
id=None, # type: Optional[str]
assignment_resource_url=None, # type: Optional[str]
display_name=None, # type: Optional[str]
created_date_time=None, # type: Optional[datetime.datetime]
last_modified_date_time=None, # type: Optional[datetime.datetime]
microsoft_graph_identity_id=None, # type: Optional[str]
microsoft_graph_identity_display_name=None, # type: Optional[str]
id1=None, # type: Optional[str]
display_name1=None, # type: Optional[str]
id2=None, # type: Optional[str]
display_name2=None, # type: Optional[str]
id3=None, # type: Optional[str]
display_name3=None, # type: Optional[str]
id4=None, # type: Optional[str]
display_name4=None, # type: Optional[str]
id5=None, # type: Optional[str]
display_name5=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphEducationSubmissionResource"
"""Create new navigation property to submittedResources for education.
Create new navigation property to submittedResources for education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param id: Read-only.
:type id: str
:param assignment_resource_url:
:type assignment_resource_url: str
:param display_name:
:type display_name: str
:param created_date_time:
:type created_date_time: ~datetime.datetime
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param microsoft_graph_identity_id: Unique identifier for the identity.
:type microsoft_graph_identity_id: str
:param microsoft_graph_identity_display_name: The identity's display name. Note that this may
not always be available or up to date. For example, if a user changes their display name, the
API may show the new value in a future response, but the items associated with the user won't
show up as having changed when using delta.
:type microsoft_graph_identity_display_name: str
:param id1: Unique identifier for the identity.
:type id1: str
:param display_name1: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name1: str
:param id2: Unique identifier for the identity.
:type id2: str
:param display_name2: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name2: str
:param id3: Unique identifier for the identity.
:type id3: str
:param display_name3: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name3: str
:param id4: Unique identifier for the identity.
:type id4: str
:param display_name4: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name4: str
:param id5: Unique identifier for the identity.
:type id5: str
:param display_name5: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name5: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphEducationSubmissionResource, or the result of cls(response)
:rtype: ~education.models.MicrosoftGraphEducationSubmissionResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphEducationSubmissionResource"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.MicrosoftGraphEducationSubmissionResource(id=id, assignment_resource_url=assignment_resource_url, display_name_resource_display_name=display_name, created_date_time=created_date_time, last_modified_date_time=last_modified_date_time, id_resource_last_modified_by_user_id=microsoft_graph_identity_id, display_name_resource_last_modified_by_user_display_name=microsoft_graph_identity_display_name, id_resource_last_modified_by_device_id=id1, display_name_resource_last_modified_by_device_display_name=display_name1, id_resource_last_modified_by_application_id=id2, display_name_resource_last_modified_by_application_display_name=display_name2, id_resource_created_by_user_id=id3, display_name_resource_created_by_user_display_name=display_name3, id_resource_created_by_device_id=id4, display_name_resource_created_by_device_display_name=display_name4, id_resource_created_by_application_id=id5, display_name_resource_created_by_application_display_name=display_name5)
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_submitted_resource.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'MicrosoftGraphEducationSubmissionResource')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphEducationSubmissionResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_submitted_resource.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/submittedResources'} # type: ignore
def get_submitted_resource(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
education_submission_resource_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum147"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphEducationSubmissionResource"
"""Get submittedResources from education.
Get submittedResources from education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param education_submission_resource_id: key: educationSubmissionResource-id of
educationSubmissionResource.
:type education_submission_resource_id: str
:param select: Select properties to be returned.
:type select: list[str or ~education.models.Enum147]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphEducationSubmissionResource, or the result of cls(response)
:rtype: ~education.models.MicrosoftGraphEducationSubmissionResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphEducationSubmissionResource"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_submitted_resource.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
'educationSubmissionResource-id': self._serialize.url("education_submission_resource_id", education_submission_resource_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphEducationSubmissionResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_submitted_resource.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/submittedResources/{educationSubmissionResource-id}'} # type: ignore
def update_submitted_resource(
self,
education_class_id, # type: str
education_assignment_id, # type: str
education_submission_id, # type: str
education_submission_resource_id, # type: str
id=None, # type: Optional[str]
assignment_resource_url=None, # type: Optional[str]
display_name=None, # type: Optional[str]
created_date_time=None, # type: Optional[datetime.datetime]
last_modified_date_time=None, # type: Optional[datetime.datetime]
microsoft_graph_identity_id=None, # type: Optional[str]
microsoft_graph_identity_display_name=None, # type: Optional[str]
id1=None, # type: Optional[str]
display_name1=None, # type: Optional[str]
id2=None, # type: Optional[str]
display_name2=None, # type: Optional[str]
id3=None, # type: Optional[str]
display_name3=None, # type: Optional[str]
id4=None, # type: Optional[str]
display_name4=None, # type: Optional[str]
id5=None, # type: Optional[str]
display_name5=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property submittedResources in education.
Update the navigation property submittedResources in education.
:param education_class_id: key: educationClass-id of educationClass.
:type education_class_id: str
:param education_assignment_id: key: educationAssignment-id of educationAssignment.
:type education_assignment_id: str
:param education_submission_id: key: educationSubmission-id of educationSubmission.
:type education_submission_id: str
:param education_submission_resource_id: key: educationSubmissionResource-id of
educationSubmissionResource.
:type education_submission_resource_id: str
:param id: Read-only.
:type id: str
:param assignment_resource_url:
:type assignment_resource_url: str
:param display_name:
:type display_name: str
:param created_date_time:
:type created_date_time: ~datetime.datetime
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param microsoft_graph_identity_id: Unique identifier for the identity.
:type microsoft_graph_identity_id: str
:param microsoft_graph_identity_display_name: The identity's display name. Note that this may
not always be available or up to date. For example, if a user changes their display name, the
API may show the new value in a future response, but the items associated with the user won't
show up as having changed when using delta.
:type microsoft_graph_identity_display_name: str
:param id1: Unique identifier for the identity.
:type id1: str
:param display_name1: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name1: str
:param id2: Unique identifier for the identity.
:type id2: str
:param display_name2: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name2: str
:param id3: Unique identifier for the identity.
:type id3: str
:param display_name3: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name3: str
:param id4: Unique identifier for the identity.
:type id4: str
:param display_name4: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name4: str
:param id5: Unique identifier for the identity.
:type id5: str
:param display_name5: The identity's display name. Note that this may not always be available
or up to date. For example, if a user changes their display name, the API may show the new
value in a future response, but the items associated with the user won't show up as having
changed when using delta.
:type display_name5: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.MicrosoftGraphEducationSubmissionResource(id=id, assignment_resource_url=assignment_resource_url, display_name_resource_display_name=display_name, created_date_time=created_date_time, last_modified_date_time=last_modified_date_time, id_resource_last_modified_by_user_id=microsoft_graph_identity_id, display_name_resource_last_modified_by_user_display_name=microsoft_graph_identity_display_name, id_resource_last_modified_by_device_id=id1, display_name_resource_last_modified_by_device_display_name=display_name1, id_resource_last_modified_by_application_id=id2, display_name_resource_last_modified_by_application_display_name=display_name2, id_resource_created_by_user_id=id3, display_name_resource_created_by_user_display_name=display_name3, id_resource_created_by_device_id=id4, display_name_resource_created_by_device_display_name=display_name4, id_resource_created_by_application_id=id5, display_name_resource_created_by_application_display_name=display_name5)
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.update_submitted_resource.metadata['url'] # type: ignore
path_format_arguments = {
'educationClass-id': self._serialize.url("education_class_id", education_class_id, 'str'),
'educationAssignment-id': self._serialize.url("education_assignment_id", education_assignment_id, 'str'),
'educationSubmission-id': self._serialize.url("education_submission_id", education_submission_id, 'str'),
'educationSubmissionResource-id': self._serialize.url("education_submission_resource_id", education_submission_resource_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'MicrosoftGraphEducationSubmissionResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_submitted_resource.metadata = {'url': '/education/classes/{educationClass-id}/assignments/{educationAssignment-id}/submissions/{educationSubmission-id}/submittedResources/{educationSubmissionResource-id}'} # type: ignore
| [
"[email protected]"
] | |
aca386663ee2b7bb52e07fbc3da653ace55ccb39 | fa3e527114cd5799dddb0a25067da4923eae354e | /FastSim/JUNO/reco/reco_test_v1.py | 44cb6f1977fa86490d524b1432e2d1ebb9f8a52f | [] | no_license | wenxingfang/FastSim_ML | e64c6b56ce2afd703d1ddda0ada2de6f65fde049 | d2f1abbb2f6879313d5f4f137b64c4d8bf10fe83 | refs/heads/master | 2022-11-28T01:35:39.727895 | 2020-08-03T15:47:37 | 2020-08-03T15:47:37 | 284,734,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,898 | py | import math
import yaml
import h5py
import json
import argparse
import numpy as np
from keras.models import model_from_json
from keras.models import model_from_yaml
from keras.models import load_model
from sklearn.utils import shuffle
def get_parser():
parser = argparse.ArgumentParser(
description='Run CalGAN training. '
'Sensible defaults come from [arXiv/1511.06434]',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--batch-size', action='store', type=int, default=2,
help='batch size per update')
parser.add_argument('--disc-lr', action='store', type=float, default=2e-5,
help='Adam learning rate for discriminator')
parser.add_argument('--gen-lr', action='store', type=float, default=2e-4,
help='Adam learning rate for generator')
parser.add_argument('--adam-beta', action='store', type=float, default=0.5,
help='Adam beta_1 parameter')
parser.add_argument('--prog-bar', action='store_true',
help='Whether or not to use a progress bar')
parser.add_argument('--no-attn', action='store_true',
help='Whether to turn off the layer to layer attn.')
parser.add_argument('--debug', action='store_true',
help='Whether to run debug level logging')
parser.add_argument('--model-in', action='store',type=str,
default='',
help='input of trained reg model')
parser.add_argument('--weight-in', action='store',type=str,
default='',
help='input of trained reg weight')
parser.add_argument('--datafile', action='store', type=str,
help='yaml file with particles and HDF5 paths (see '
'github.com/hep-lbdl/CaloGAN/blob/master/models/'
'particles.yaml)')
parser.add_argument('--output', action='store',type=str,
default='',
help='output of result real vs reco')
return parser
if __name__ == '__main__':
parser = get_parser()
parse_args = parser.parse_args()
model = load_model(parse_args.model_in)
d = h5py.File(parse_args.datafile, 'r')
first = np.expand_dims(d['firstHitTimeByPMT'][:], -1)
second = np.expand_dims(d['nPEByPMT'][:], -1)
infoMuon = d['infoMuon'][:,:4]
d.close()
print('infoMuon dtype',infoMuon.dtype)
infoMuon = infoMuon.astype(float)
print('infoMuon dtype',infoMuon.dtype)
### normalize muon info ##############
infoMuon[:,0]=(infoMuon[:,0])/math.pi
infoMuon[:,1]=(infoMuon[:,1])/math.pi
infoMuon[:,2]=(infoMuon[:,2])/math.pi
infoMuon[:,3]=(infoMuon[:,3])/math.pi
#infoMuon[:,4]=(infoMuon[:,4])/18000#17700.0
first, second, infoMuon = shuffle(first, second, infoMuon, random_state=0)
nBatch = int(first.shape[0]/parse_args.batch_size)
iBatch = np.random.randint(nBatch, size=1)
iBatch = iBatch[0]
input_first = first [iBatch*parse_args.batch_size:(iBatch+1)*parse_args.batch_size]
input_second = second[iBatch*parse_args.batch_size:(iBatch+1)*parse_args.batch_size]
result = model.predict([input_first, input_second], verbose=True)
ptheta = result[0]
pphi = result[1]
rtheta = result[2]
rphi = result[3]
print('ptheta:',ptheta[:10])
print('pphi:' ,pphi [:10])
print('rtheta:',rtheta[:10])
print('rphi:' ,rphi [:10])
result = np.concatenate((ptheta, pphi, rtheta, rphi),axis=-1)
real = infoMuon[iBatch*parse_args.batch_size:(iBatch+1)*parse_args.batch_size]
result1= model.test_on_batch([input_first, input_second],[real[:,0], real[:,1], real[:,2], real[:,3]])
result2= model.predict_on_batch([input_first, input_second])
print('test_on_batch:', result1)
print('predict_on_batch:', result2)
print('choose batch:', iBatch)
print('pred:\n',result)
print('real:\n',real)
print('diff:\n',result - real)
######### transfer to actual value #######
real[:,0] = real[:,0]*math.pi
real[:,1] = real[:,1]*math.pi
real[:,2] = real[:,2]*math.pi
real[:,3] = real[:,3]*math.pi
#real[:,4] = real[:,4]*18000
result[:,0] = result[:,0]*math.pi
result[:,1] = result[:,1]*math.pi
result[:,2] = result[:,2]*math.pi
result[:,3] = result[:,3]*math.pi
#result[:,4] = result[:,4]*18000
abs_diff = np.abs(result - real)
print('abs error:\n', abs_diff)
print('mean abs error:\n',np.mean(abs_diff, axis=0))
print('std abs error:\n',np.std (abs_diff, axis=0))
###### save ##########
hf = h5py.File(parse_args.output, 'w')
hf.create_dataset('input_info', data=real)
hf.create_dataset('reco_info' , data=result)
hf.close()
print ('Done')
| [
"[email protected]"
] | |
958e66963b687952abe54f34a00c7cef057ef540 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/Lambda-Resource-Static-Assets/2-resources/_External-learning-resources/02-pyth/Python-master/ciphers/hill_cipher.py | bc8f5b41b624ca389e5279c713516e329ed4da0f | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 7,549 | py | """
Hill Cipher:
The 'HillCipher' class below implements the Hill Cipher algorithm which uses
modern linear algebra techniques to encode and decode text using an encryption
key matrix.
Algorithm:
Let the order of the encryption key be N (as it is a square matrix).
Your text is divided into batches of length N and converted to numerical vectors
by a simple mapping starting with A=0 and so on.
The key is then multiplied with the newly created batch vector to obtain the
encoded vector. After each multiplication modular 36 calculations are performed
on the vectors so as to bring the numbers between 0 and 36 and then mapped with
their corresponding alphanumerics.
While decrypting, the decrypting key is found which is the inverse of the
encrypting key modular 36. The same process is repeated for decrypting to get
the original message back.
Constraints:
The determinant of the encryption key matrix must be relatively prime w.r.t 36.
Note:
This implementation only considers alphanumerics in the text. If the length of
the text to be encrypted is not a multiple of the break key(the length of one
batch of letters), the last character of the text is added to the text until the
length of the text reaches a multiple of the break_key. So the text after
decrypting might be a little different than the original text.
References:
https://apprendre-en-ligne.net/crypto/hill/Hillciph.pdf
https://www.youtube.com/watch?v=kfmNeskzs2o
https://www.youtube.com/watch?v=4RhLNDqcjpA
"""
import string
import numpy
def greatest_common_divisor(a: int, b: int) -> int:
"""
>>> greatest_common_divisor(4, 8)
4
>>> greatest_common_divisor(8, 4)
4
>>> greatest_common_divisor(4, 7)
1
>>> greatest_common_divisor(0, 10)
10
"""
return b if a == 0 else greatest_common_divisor(b % a, a)
class HillCipher:
key_string = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
modulus = numpy.vectorize(lambda x: x % 36)
to_int = numpy.vectorize(lambda x: round(x))
def __init__(self, encrypt_key: numpy.ndarray) -> None:
"""
encrypt_key is an NxN numpy array
"""
self.encrypt_key = self.modulus(encrypt_key) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
self.break_key = encrypt_key.shape[0]
def replace_letters(self, letter: str) -> int:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.replace_letters('T')
19
>>> hill_cipher.replace_letters('0')
26
"""
return self.key_string.index(letter)
def replace_digits(self, num: int) -> str:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.replace_digits(19)
'T'
>>> hill_cipher.replace_digits(26)
'0'
"""
return self.key_string[round(num)]
def check_determinant(self) -> None:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.check_determinant()
"""
det = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
det = det % len(self.key_string)
req_l = len(self.key_string)
if greatest_common_divisor(det, len(self.key_string)) != 1:
raise ValueError(
f"determinant modular {req_l} of encryption key({det}) is not co prime "
f"w.r.t {req_l}.\nTry another key."
)
def process_text(self, text: str) -> str:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.process_text('Testing Hill Cipher')
'TESTINGHILLCIPHERR'
>>> hill_cipher.process_text('hello')
'HELLOO'
"""
chars = [char for char in text.upper() if char in self.key_string]
last = chars[-1]
while len(chars) % self.break_key != 0:
chars.append(last)
return "".join(chars)
def encrypt(self, text: str) -> str:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.encrypt('testing hill cipher')
'WHXYJOLM9C6XT085LL'
>>> hill_cipher.encrypt('hello')
'85FF00'
"""
text = self.process_text(text.upper())
encrypted = ""
for i in range(0, len(text) - self.break_key + 1, self.break_key):
batch = text[i : i + self.break_key]
vec = [self.replace_letters(char) for char in batch]
batch_vec = numpy.array([vec]).T
batch_encrypted = self.modulus(self.encrypt_key.dot(batch_vec)).T.tolist()[
0
]
encrypted_batch = "".join(
self.replace_digits(num) for num in batch_encrypted
)
encrypted += encrypted_batch
return encrypted
def make_decrypt_key(self) -> numpy.ndarray:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.make_decrypt_key()
array([[ 6, 25],
[ 5, 26]])
"""
det = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
det = det % len(self.key_string)
det_inv = None
for i in range(len(self.key_string)):
if (det * i) % len(self.key_string) == 1:
det_inv = i
break
inv_key = (
det_inv
* numpy.linalg.det(self.encrypt_key)
* numpy.linalg.inv(self.encrypt_key)
)
return self.to_int(self.modulus(inv_key))
def decrypt(self, text: str) -> str:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.decrypt('WHXYJOLM9C6XT085LL')
'TESTINGHILLCIPHERR'
>>> hill_cipher.decrypt('85FF00')
'HELLOO'
"""
decrypt_key = self.make_decrypt_key()
text = self.process_text(text.upper())
decrypted = ""
for i in range(0, len(text) - self.break_key + 1, self.break_key):
batch = text[i : i + self.break_key]
vec = [self.replace_letters(char) for char in batch]
batch_vec = numpy.array([vec]).T
batch_decrypted = self.modulus(decrypt_key.dot(batch_vec)).T.tolist()[0]
decrypted_batch = "".join(
self.replace_digits(num) for num in batch_decrypted
)
decrypted += decrypted_batch
return decrypted
def main() -> None:
N = int(input("Enter the order of the encryption key: "))
hill_matrix = []
print("Enter each row of the encryption key with space separated integers")
for _ in range(N):
row = [int(x) for x in input().split()]
hill_matrix.append(row)
hc = HillCipher(numpy.array(hill_matrix))
print("Would you like to encrypt or decrypt some text? (1 or 2)")
option = input("\n1. Encrypt\n2. Decrypt\n")
if option == "1":
text_e = input("What text would you like to encrypt?: ")
print("Your encrypted text is:")
print(hc.encrypt(text_e))
elif option == "2":
text_d = input("What text would you like to decrypt?: ")
print("Your decrypted text is:")
print(hc.decrypt(text_d))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| [
"[email protected]"
] | |
4e6831c00eea402266ff39cd2de8df9a3ff7de0f | a72cb4d00528fb3d2d47f99a1ccca1b8b9b41ff7 | /scripts/addons_extern/space_view3d_rotview.py | e9abc14ef5ebc45a9695ad927f5db3e218dce9c0 | [] | no_license | talocan/blenderpython | b05204881183ff901ec189916a3bcc1d3e9d3e20 | 056ac37e76a1b410696c9efe4fe0ea09fdc68c0e | refs/heads/master | 2021-01-18T05:16:47.221786 | 2014-07-11T17:01:53 | 2014-07-11T17:01:53 | 21,749,332 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,151 | py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
__bpydoc__ = """\
The RotView addon serves the purpose of setting fixed rotation values for each of the
right/left/front/back/top/bottom views.
Documentation
First go to User Preferences->Addons and enable the RotView addon in the 3D View category.
To change the rotation in realtime first press one of the numerical keypad
view shortcuts to switch into a view and set the rotation
value with the slider (doubleclick for keyboard input) or use the <-90 and 90-> buttons to
switch to the next multiple of 90 degrees value. Button 0 goes back to zero rotation.
The rotation value of each of the views will be remembered when switching into it again from
the numerical keypad.
REMARK - when first enabling the addon, when in an affected view already, rotation will not work.
Enable the view again with numerical keypad shortcut.
REMARK - will not work when switching view through the View menu
"""
bl_info = {
"name": "RotView",
"author": "Gert De Roost",
"version": (0, 1, 7),
"blender": (2, 6, 3),
"location": "View3D > UI",
"description": "Set fixed view rotation values",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "3D View"}
if "bpy" in locals():
import imp
import bpy
from mathutils import *
import math
from time import sleep
activated = 0
frontrot = 0
backrot = 0
rightrot = 0
leftrot = 0
toprot = 0
bottomrot = 0
inview = 0
oldangle = 0
oldview = 0
adapt = 0
viewstring = ["", "FRONT", "BACK", "RIGHT", "LEFT", "TOP", "BOTTOM"]
viewnum = 1
bpy.types.Scene.Angle = bpy.props.FloatProperty(
name = "Rotation angle",
description = "Enter rotation angle",
default = 0,
min = -360,
max = 360)
class RotViewPanel(bpy.types.Panel):
bl_label = "RotView"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
def draw_header(self, context):
global activated
layout = self.layout
if not(activated):
layout.operator("view3d.rotview", text="Activate")
def draw(self, context):
global frontrot, backrot, rightrot, leftrot, toprot, bottomrot, matrot
global oldangle, inview
scn = bpy.context.scene
layout = self.layout
if activated:
layout.label(viewstring[viewnum])
layout.prop(scn, "Angle")
row = layout.row()
row.operator("view.minus90",
text="<-90")
row.operator("view.nill",
text="0")
row.operator("view.plus90",
text="90->")
if viewnum == 1:
frontrot = scn.Angle
matrotX = Matrix.Rotation(math.radians(90), 3, 'X')
matrotY = Matrix.Rotation(math.radians(-scn.Angle), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(0), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
elif viewnum == 2:
backrot = scn.Angle
matrotX = Matrix.Rotation(math.radians(90), 3, 'X')
matrotY = Matrix.Rotation(math.radians(-scn.Angle), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(180), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
elif viewnum == 3:
rightrot = scn.Angle
matrotX = Matrix.Rotation(math.radians(scn.Angle), 3, 'X')
matrotY = Matrix.Rotation(math.radians(90), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(90), 3, 'Z')
quat = matrotZ.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotX)
elif viewnum == 4:
leftrot = scn.Angle
matrotX = Matrix.Rotation(math.radians(-scn.Angle), 3, 'X')
matrotY = Matrix.Rotation(math.radians(-90), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(-90), 3, 'Z')
quat = matrotZ.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotX)
elif viewnum == 5:
toprot = scn.Angle
matrotX = Matrix.Rotation(math.radians(0), 3, 'X')
matrotY = Matrix.Rotation(math.radians(0), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(scn.Angle), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
elif viewnum == 6:
bottomrot = scn.Angle
matrotX = Matrix.Rotation(math.radians(180), 3, 'X')
matrotY = Matrix.Rotation(math.radians(0), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(-scn.Angle), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
if (inview == 1) and scn.Angle != oldangle:
bpy.context.space_data.region_3d.view_rotation = quat
# matrix = bpy.context.space_data.region_3d.view_matrix.to_3x3()
# matrix.rotate(matrot)
# bpy.context.space_data.region_3d.view_matrix = matrix.to_4x4()
# bpy.context.region.tag_redraw()
oldangle = scn.Angle
if inview == 2:
bpy.context.space_data.region_3d.view_rotation = quat
inview = 0
class Minus90(bpy.types.Operator):
bl_idname = "view.minus90"
bl_label = ""
bl_description = "To next 90 degrees multiple"
def invoke(self, context, event):
scn = bpy.context.scene
if (scn.Angle // 90) == (scn.Angle / 90):
if scn.Angle == -360:
scn.Angle = 270
else:
scn.Angle -= 90
else:
scn.Angle = (scn.Angle // 90) * 90
return {'FINISHED'}
class Plus90(bpy.types.Operator):
bl_idname = "view.plus90"
bl_label = ""
bl_description = "To previous 90 degrees multiple"
def invoke(self, context, event):
scn = bpy.context.scene
if scn.Angle == 360:
scn.Angle = -270
else:
scn.Angle = ((scn.Angle // 90) + 1) * 90
return {'FINISHED'}
class Nill(bpy.types.Operator):
bl_idname = "view.nill"
bl_label = ""
bl_description = "Set rotation to 0"
def invoke(self, context, event):
scn = bpy.context.scene
scn.Angle = 0
return {'FINISHED'}
class RotView(bpy.types.Operator):
bl_idname = "view3d.rotview"
bl_label = "RotView"
bl_description = "Set fixed view rotation values"
bl_options = {"REGISTER"}
def invoke(self, context, event):
global activated
activated = 1
do_rotview(self)
context.window_manager.modal_handler_add(self)
self._handle = context.region.callback_add(redraw, (), 'POST_VIEW')
return {'RUNNING_MODAL'}
def modal(self, context, event):
global frontrot, backrot, rightrot, leftrot, toprot, bottomrot
global quat
global inview, adapt, viewnum
scn = bpy.context.scene
if event.type == "TIMER":
bpy.context.window_manager.event_timer_remove(timer)
bpy.context.space_data.region_3d.view_rotation = quat
bpy.context.region.tag_redraw()
if event.type == "MIDDLEMOUSE":
inview = 0
if viewnum == 1:
matrotX = Matrix.Rotation(math.radians(90), 3, 'X')
matrotY = Matrix.Rotation(math.radians(0), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(0), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
elif viewnum == 2:
matrotX = Matrix.Rotation(math.radians(90), 3, 'X')
matrotY = Matrix.Rotation(math.radians(0), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(180), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
elif viewnum == 3:
matrotX = Matrix.Rotation(math.radians(0), 3, 'X')
matrotY = Matrix.Rotation(math.radians(90), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(90), 3, 'Z')
quat = matrotZ.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotX)
elif viewnum == 4:
matrotX = Matrix.Rotation(math.radians(0), 3, 'X')
matrotY = Matrix.Rotation(math.radians(-90), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(-90), 3, 'Z')
quat = matrotZ.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotX)
elif viewnum == 5:
matrotX = Matrix.Rotation(math.radians(0), 3, 'X')
matrotY = Matrix.Rotation(math.radians(0), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(0), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
elif viewnum == 6:
matrotX = Matrix.Rotation(math.radians(180), 3, 'X')
matrotY = Matrix.Rotation(math.radians(0), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(0), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
bpy.context.space_data.region_3d.view_rotation = quat
elif event.type == "NUMPAD_1":
if event.value == "RELEASE":
if not(event.ctrl):
viewnum = 1
scn.Angle = frontrot
adapt = 1
matrotX = Matrix.Rotation(math.radians(90), 3, 'X')
matrotY = Matrix.Rotation(math.radians(-scn.Angle), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(0), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
inview = 1
bpy.context.region.tag_redraw()
else:
viewnum = 2
scn.Angle = backrot
adapt = 1
matrotX = Matrix.Rotation(math.radians(90), 3, 'X')
matrotY = Matrix.Rotation(math.radians(-scn.Angle), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(180), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
inview = 1
bpy.context.region.tag_redraw()
return {"RUNNING_MODAL"}
return {"PASS_THROUGH"}
elif event.type == "NUMPAD_3":
if event.value == "RELEASE":
if not(event.ctrl):
viewnum = 3
scn.Angle = rightrot
adapt = 1
matrotX = Matrix.Rotation(math.radians(scn.Angle), 3, 'X')
matrotY = Matrix.Rotation(math.radians(90), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(90), 3, 'Z')
quat = matrotZ.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotX)
inview = 1
bpy.context.region.tag_redraw()
else:
viewnum = 4
scn.Angle = leftrot
adapt = 1
matrotX = Matrix.Rotation(math.radians(-scn.Angle), 3, 'X')
matrotY = Matrix.Rotation(math.radians(-90), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(-90), 3, 'Z')
quat = matrotZ.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotX)
inview = 1
bpy.context.region.tag_redraw()
return {"RUNNING_MODAL"}
return {"PASS_THROUGH"}
elif event.type == "NUMPAD_7":
if event.value == "RELEASE":
if not(event.ctrl):
viewnum = 5
scn.Angle = toprot
adapt = 1
matrotX = Matrix.Rotation(math.radians(0), 3, 'X')
matrotY = Matrix.Rotation(math.radians(0), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(scn.Angle), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
inview = 1
bpy.context.region.tag_redraw()
else:
viewnum = 6
scn.Angle = bottomrot
adapt = 1
matrotX = Matrix.Rotation(math.radians(180), 3, 'X')
matrotY = Matrix.Rotation(math.radians(0), 3, 'Y')
matrotZ = Matrix.Rotation(math.radians(-scn.Angle), 3, 'Z')
quat = matrotX.to_quaternion()
quat.rotate(matrotY)
quat.rotate(matrotZ)
inview = 1
bpy.context.region.tag_redraw()
return {"RUNNING_MODAL"}
return {"PASS_THROUGH"}
return {"PASS_THROUGH"}
def register():
bpy.utils.register_module(__name__)
def unregister():
bpy.utils.unregister_module(__name__)
if __name__ == "__main__":
register()
def do_rotview(self):
global regionui
for region in bpy.context.area.regions:
if region.type == "UI":
regionui = region
def redraw():
global adapt, timer
if adapt == 1:
adapt = 0
timer = bpy.context.window_manager.event_timer_add(0.1, bpy.context.window)
# bpy.context.region.tag_redraw()
# elif adapt == 2:
# sleep(0.1)
# adapt = 0
# bpy.context.space_data.region_3d.view_rotation = quat
| [
"[email protected]"
] | |
4f6b1dc211e4bc1cc3ec122d5bc8cba70661d87d | f11600b9a256bf6a2b584d127faddc27a0f0b474 | /normal/1401.py | 52bff56683cf923937222e17b79a7c0999757b14 | [] | no_license | longhao54/leetcode | 9c1f0ce4ca505ec33640dd9b334bae906acd2db5 | d156c6a13c89727f80ed6244cae40574395ecf34 | refs/heads/master | 2022-10-24T07:40:47.242861 | 2022-10-20T08:50:52 | 2022-10-20T08:50:52 | 196,952,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,121 | py | class Solution:
def checkOverlap(self, radius: int, x_center: int, y_center: int, x1: int, y1: int, x2: int, y2: int) -> bool:
# ๆกไปถ 1๏ผ้ฆๅ
ๅคๆญๅๅฟๆฏๅฆๅจ็ฉๅฝขๅ
if x1 <= x_center <= x2 and y1 <= y_center <= y2:
return True
# ๆกไปถ 2๏ผๅๅฟไฝไบ็ฉๅฝข็ไธไธๅทฆๅณๅไธชๅบๅ
elif x_center > x2 and y1 <= y_center <= y2: # ๅณ
return radius >= x_center - x2
elif y_center < y1 and x1 <= x_center <= x2: # ไธ
return radius >= y1 - y_center
elif x_center < x1 and y1<= y_center <= y2: # ๅทฆ
return radius >= x1 - x_center
elif y_center > y2 and x1 <= x_center <= x2: # ไธ
return radius >= y_center - y2
else:
# ๆกไปถ 3๏ผๅคๆญ็ฉๅฝข็ๅไธช้กถ็นๆฏๅฆๅจๅ็ๅ
้จ
return min((x1 - x_center) ** 2 + (y2 - y_center) ** 2,\
(x2 - x_center) ** 2 + (y2 - y_center) ** 2, \
(x2 - x_center) ** 2 + (y1 - y_center) ** 2, \
(x1 - x_center) ** 2 + (y1 - y_center) ** 2) <= radius ** 2
| [
"[email protected]"
] | |
af24b9455252c6b9b58c9672b4c0a8a22e0657eb | 334fafa9d87fdd13cc549a662a0cf35c47f2d5e3 | /backend/data/bahn/bahnscript/bahn2.py | 9738bfe0b9f26768f010e9a577d6262182208138 | [] | no_license | Jugendhackt/apartmapp | 27cd7c17e808a6948f017043e61ebd5480e87c89 | 0fded8e1da75a021547b53d68e88e9db524b088e | refs/heads/master | 2021-01-15T14:31:43.959035 | 2014-06-08T12:10:04 | 2014-06-08T12:10:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | import json, psycopg2
filename = "plankgeo_data.json"
def trains(filename):
con = psycopg2.connect(database='appartmapp', user='postgres')
cur = con.cursor()
with open(filename, "r") as data:
json_encoded = json.loads(data.read())
data.close()
# print to screen
print json_encoded[0]['lat']
for entry in json_encoded:
cur.execute("INSERT INTO items(name, type, picture, lat, lng)VALUES(%s, 'dbstop', 'dbstop.jpg', %s, %s)", (entry['id'], entry['lat'], entry['lon']))
con.commit()
con.close()
trains(filename) | [
"[email protected]"
] | |
9a7032fb4a6c3a4c73af2c3f8c631ba5100585c7 | 638b207f3c7706cb0cb9dd1d6cf112ab91f69837 | /0x11-python-network_1/5-hbtn_header.py | c0286a8b9aaaf86889e63216152a5918919ad69c | [] | no_license | NasserAbuchaibe/holbertonschool-higher_level_programming | c30a066dfd4525e936b4121f930c3a63e6d911d6 | 5b0c11423e11bd9201cc057775c099eb0259f305 | refs/heads/master | 2022-12-16T17:15:57.775143 | 2020-09-25T03:00:56 | 2020-09-25T03:00:56 | 259,379,453 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | #!/usr/bin/python3
""" Response header value
"""
import requests
from sys import argv
if __name__ == "__main__":
"""ok
"""
r = requests.get(argv[1])
print(r.headers.get('X-Request-Id'))
| [
"[email protected]"
] | |
d5da6927dd31fe7ad45d93dbfc11c2071edde7dc | 14bca3c05f5d8de455c16ec19ac7782653da97b2 | /lib/kubernetes/client/models/v1_container_status.py | 1a90953d9050f84b6e1a3a816893dd05898df8f7 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hovu96/splunk_as_a_service_app | 167f50012c8993879afbeb88a1f2ba962cdf12ea | 9da46cd4f45603c5c4f63ddce5b607fa25ca89de | refs/heads/master | 2020-06-19T08:35:21.103208 | 2020-06-16T19:07:00 | 2020-06-16T19:07:00 | 196,641,210 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,938 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ContainerStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'container_id': 'str',
'image': 'str',
'image_id': 'str',
'last_state': 'V1ContainerState',
'name': 'str',
'ready': 'bool',
'restart_count': 'int',
'state': 'V1ContainerState'
}
attribute_map = {
'container_id': 'containerID',
'image': 'image',
'image_id': 'imageID',
'last_state': 'lastState',
'name': 'name',
'ready': 'ready',
'restart_count': 'restartCount',
'state': 'state'
}
def __init__(self, container_id=None, image=None, image_id=None, last_state=None, name=None, ready=None, restart_count=None, state=None):
"""
V1ContainerStatus - a model defined in Swagger
"""
self._container_id = None
self._image = None
self._image_id = None
self._last_state = None
self._name = None
self._ready = None
self._restart_count = None
self._state = None
self.discriminator = None
if container_id is not None:
self.container_id = container_id
self.image = image
self.image_id = image_id
if last_state is not None:
self.last_state = last_state
self.name = name
self.ready = ready
self.restart_count = restart_count
if state is not None:
self.state = state
@property
def container_id(self):
"""
Gets the container_id of this V1ContainerStatus.
Container's ID in the format 'docker://<container_id>'.
:return: The container_id of this V1ContainerStatus.
:rtype: str
"""
return self._container_id
@container_id.setter
def container_id(self, container_id):
"""
Sets the container_id of this V1ContainerStatus.
Container's ID in the format 'docker://<container_id>'.
:param container_id: The container_id of this V1ContainerStatus.
:type: str
"""
self._container_id = container_id
@property
def image(self):
"""
Gets the image of this V1ContainerStatus.
The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images
:return: The image of this V1ContainerStatus.
:rtype: str
"""
return self._image
@image.setter
def image(self, image):
"""
Sets the image of this V1ContainerStatus.
The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images
:param image: The image of this V1ContainerStatus.
:type: str
"""
if image is None:
raise ValueError("Invalid value for `image`, must not be `None`")
self._image = image
@property
def image_id(self):
"""
Gets the image_id of this V1ContainerStatus.
ImageID of the container's image.
:return: The image_id of this V1ContainerStatus.
:rtype: str
"""
return self._image_id
@image_id.setter
def image_id(self, image_id):
"""
Sets the image_id of this V1ContainerStatus.
ImageID of the container's image.
:param image_id: The image_id of this V1ContainerStatus.
:type: str
"""
if image_id is None:
raise ValueError("Invalid value for `image_id`, must not be `None`")
self._image_id = image_id
@property
def last_state(self):
"""
Gets the last_state of this V1ContainerStatus.
Details about the container's last termination condition.
:return: The last_state of this V1ContainerStatus.
:rtype: V1ContainerState
"""
return self._last_state
@last_state.setter
def last_state(self, last_state):
"""
Sets the last_state of this V1ContainerStatus.
Details about the container's last termination condition.
:param last_state: The last_state of this V1ContainerStatus.
:type: V1ContainerState
"""
self._last_state = last_state
@property
def name(self):
"""
Gets the name of this V1ContainerStatus.
This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.
:return: The name of this V1ContainerStatus.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1ContainerStatus.
This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.
:param name: The name of this V1ContainerStatus.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def ready(self):
"""
Gets the ready of this V1ContainerStatus.
Specifies whether the container has passed its readiness probe.
:return: The ready of this V1ContainerStatus.
:rtype: bool
"""
return self._ready
@ready.setter
def ready(self, ready):
"""
Sets the ready of this V1ContainerStatus.
Specifies whether the container has passed its readiness probe.
:param ready: The ready of this V1ContainerStatus.
:type: bool
"""
if ready is None:
raise ValueError("Invalid value for `ready`, must not be `None`")
self._ready = ready
@property
def restart_count(self):
"""
Gets the restart_count of this V1ContainerStatus.
The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.
:return: The restart_count of this V1ContainerStatus.
:rtype: int
"""
return self._restart_count
@restart_count.setter
def restart_count(self, restart_count):
"""
Sets the restart_count of this V1ContainerStatus.
The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.
:param restart_count: The restart_count of this V1ContainerStatus.
:type: int
"""
if restart_count is None:
raise ValueError("Invalid value for `restart_count`, must not be `None`")
self._restart_count = restart_count
@property
def state(self):
"""
Gets the state of this V1ContainerStatus.
Details about the container's current condition.
:return: The state of this V1ContainerStatus.
:rtype: V1ContainerState
"""
return self._state
@state.setter
def state(self, state):
"""
Sets the state of this V1ContainerStatus.
Details about the container's current condition.
:param state: The state of this V1ContainerStatus.
:type: V1ContainerState
"""
self._state = state
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ContainerStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
684bb9b634cf46ead79b715049cf84129c8f2ed3 | 4bc25aaf986e481a533e22a7d74e963a18499593 | /Chapitre_5/visionneuse_1.py | 84b18735cbfc634e5f5e849a5feb1d38c636cf5d | [] | no_license | EditionsENI/python-raspberrypi-3e-edition | c5dd3be2cbc7e52793361f2a601b100011ea535d | f189aefc5ea0b265fd664c8a47dcde6cd110a8b0 | refs/heads/master | 2023-04-10T18:59:35.922958 | 2021-04-19T21:47:29 | 2021-04-19T21:47:29 | 317,060,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,780 | py | #!/usr/bin/env python3
import glob
import sys
import os
from tkinter import PhotoImage
from tkinter import Message
from tkinter import Button
from tkinter import Frame
from tkinter import Label
from tkinter import Tk
from tkinter import BOTTOM
from tkinter import LEFT
from tkinter import BOTH
from tkinter import YES
class PiVision(Tk):
def __init__(self, images):
Tk.__init__(self)
self.creer_composants()
if len(images) > 0:
self.initialiser_images()
self.afficher_image()
else:
self.afficher_erreur()
self.mainloop()
def initialiser_images(self):
liste_image = [(PhotoImage(file=image), os.path.basename(image))
for image in sorted(images)]
premiere = derniere = VImage(info=liste_image.pop(0))
for image in liste_image:
derniere = derniere.ajout(info=image)
derniere.suivante = premiere
premiere.precedente = derniere
self.image_courante = premiere
def creer_composants(self):
self.composant_image = Label(self)
self.composant_image.pack(expand=YES, fill=BOTH)
self.bouton_frame = Frame(self)
self.bouton_frame.pack(side=BOTTOM)
self.bouton_precedent = Button(
self.bouton_frame, text="Prรฉcรฉdent", command=lambda: self.image_precedente())
self.bouton_precedent.pack(side=LEFT)
self.bouton_suivant = Button(
self.bouton_frame, text="Suivant", command=lambda: self.image_suivante())
self.bouton_suivant.pack(side=LEFT)
self.bouton_fermer = Button(
self.bouton_frame, text="Fermer", command=self.destroy)
self.bouton_fermer.pack(side=LEFT)
self.bind("<Left>", lambda ev: self.image_precedente())
self.bind("<Right>", lambda ev: self.image_suivante())
self.bind("<Escape>", lambda ev: self.destroy())
def image_suivante(self):
self.image_courante = self.image_courante.suivante
self.afficher_image()
def image_precedente(self):
self.image_courante = self.image_courante.precedente
self.afficher_image()
def afficher_image(self):
image, nom_image = self.image_courante.info
self.composant_image.config(image=image)
self.title("%s - %s " % (self.__class__.__name__, nom_image))
self.update()
def afficher_erreur(self):
self.bouton_precedent.configure(state="disable")
self.bouton_suivant.configure(state="disable")
self.unbind("<Left>")
self.unbind("<Right>")
self.erreur = Message(self.composant_image,
text="Aucune image n'a รฉtรฉ trouvรฉe !",
pady=25, padx=25, aspect=800)
self.erreur.config(font=("courier", 14, "bold"))
self.erreur.pack(expand=YES, fill=BOTH)
self.title("Erreur !")
self.update()
class VImage:
def __init__(self, info, suivante=None, precedente=None):
self.info = info
self.suivante = suivante
self.precedente = precedente
def ajout(self, info):
self.suivante = VImage(info, None, self)
return self.suivante
if __name__ == "__main__":
def usage(message=""):
print(message)
sys.exit(1)
if len(sys.argv) != 2:
usage("Veuillez indiquer un rรฉpertoire!")
repertoire = sys.argv[1]
if not os.path.isdir(repertoire):
usage(f"\"{repertoire}\" n'est pas un rรฉpertoire!")
extensions = "png jpg jpeg gif".split()
extensions = extensions + list(map(str.upper, extensions))
images = []
for ext in extensions:
images.append(glob.glob(f"{repertoire}/*.{ext}"))
images = sum(images, [])
PiVision(images)
| [
"[email protected]"
] | |
45b917165828401e96c1a8c3a7cfa1b5fae52fd8 | 4f7d2beed58fd3f484b1930ca3adeac406576d66 | /config/settings/mlsettings/pyTorchClassificationParams.py | 0f57469851642d8c24fec13f9b798fbbfeec8d2d | [] | no_license | Srinidhi-SA/mAdvisorStgAPIUI | 55515997fff1e30fe22f6a88cc222dcd51816031 | 75653e9f2eef51be771991edd6473f470b344110 | refs/heads/main | 2023-08-24T15:53:27.585277 | 2021-10-08T06:27:49 | 2021-10-08T06:27:49 | 343,185,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76,760 | py | PT_ACTIVATION_ELU_PARAMS = [
{
"name": "alpha",
"displayName": "alpha",
"description": "the alpha value for the ELU formulation.",
"defaultValue": 1.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_ACTIVATION_Hardshrink_PARAMS = [
{
"name": "lambd",
"displayName": "lambd",
"description": "the lambda value for the Hardshrink formulation.",
"defaultValue": 0.5,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_ACTIVATION_Hardtanh_PARAMS = [
{
"name": "min_val",
"displayName": "min_val",
"description": "minimum value of the linear region range.",
"defaultValue": -1,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "max_val",
"displayName": "max_val",
"description": "maximum value of the linear region range.",
"defaultValue": 1,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_ACTIVATION_LeakyReLU_PARAMS = [
{
"name": "negative_slope",
"displayName": "negative_slope",
"description": "Controls the angle of the negative slope.",
"defaultValue": 0.01,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_ACTIVATION_MultiheadAttention_PARAMS = [
{
"name": "embed_dim",
"displayName": "embed_dim",
"description": "total dimension of the model.",
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "num_heads",
"displayName": "num_heads",
"description": "parallel attention heads.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "dropout",
"displayName": "dropout",
"description": "a Dropout layer on attn_output_weights.",
"defaultValue": 0.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "bias",
"displayName": "bias",
"description": "",
"defaultValue": [
{
"name": "false",
"selected": False,
"displayName": "False"
},
{
"name": "true",
"selected": False,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
{
"name": "add_bias_kv",
"displayName": "add_bias_kv",
"description": "add bias to the key and value sequences at dim=0.",
"defaultValue": [
{
"name": "false",
"selected": False,
"displayName": "False"
},
{
"name": "true",
"selected": False,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
{
"name": "add_zero_attn",
"displayName": "add_zero_attn",
"description": "add a new batch of zeros to the key and Value sequences at dim=1.",
"defaultValue": [
{
"name": "false",
"selected": False,
"displayName": "False"
},
{
"name": "true",
"selected": False,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
{
"name": "kdim",
"displayName": "kdim",
"description": "total number of features in key.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "vdim",
"displayName": "vdim",
"description": "",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
}
]
PT_ACTIVATION_PreLU_PARAMS = [
{
"name": "num_parameters",
"displayName": "num_parameters",
"description": "number of alpha to learn.",
"defaultValue": [
{
"name": "1",
"selected": True,
"displayName": "1"
},
{
"name": "no of channels",
"selected": False,
"displayName": "No of Channels"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "init",
"displayName": "init",
"description": "the initial value of alpha.",
"defaultValue": 0.25,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
PT_ACTIVATION_RreLU_PARAMS = [
{
"name": "lower",
"displayName": "lower",
"description": "lower bound of the uniform distribution.",
"defaultValue": 0.125,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "upper",
"displayName": "upper",
"description": "upper bound of the uniform distribution.",
"defaultValue": 0.33,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_ACTIVATION_CELU_PARAMS = [
{
"name": "alpha",
"displayName": "alpha",
"description": "the alpha value for the CELU formulation.",
"defaultValue": 1.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_ACTIVATION_Softplus_PARAMS = [
{
"name": "beta",
"displayName": "beta",
"description": "the beta value for the Softplus formulation.",
"defaultValue": 1.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "threshold",
"displayName": "threshold",
"description": "values above this revert to a linear function.",
"defaultValue": 20,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_ACTIVATION_Softshrink_PARAMS = [
{
"name": "lambd",
"displayName": "lambd",
"description": "the lambda value for the Softshrink formulation.",
"defaultValue": 0.5,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_ACTIVATION_Threshold_PARAMS = [
{
"name": "threshold",
"displayName": "threshold",
"description": "The value to threshold at.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "value",
"displayName": "value",
"description": "The value to replace with.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_ACTIVATION_Softmin_PARAMS = [
{
"name": "dim",
"displayName": "dim",
"description": "A dimension along which Softmin will be computed.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
}
]
PT_ACTIVATION_Softmax_PARAMS = [
{
"name": "dim",
"displayName": "dim",
"description": "A dimension along which Softmax will be computed.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
}
]
PT_ACTIVATION_LogSoftmax_PARAMS = [
{
"name": "dim",
"displayName": "dim",
"description": "A dimension along which LogSoftmax will be computed.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
}
]
PT_ACTIVATION_AdaptiveLogSoftmaxWithLoss_PARAMS = [
{
"name": "n_classes",
"displayName": "n_classes",
"description": "Number of classes in the dataset.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "cutoffs",
"displayName": "cutoffs",
"description": "Cutoffs used to assign targets to their buckets.",
"defaultValue": None,
"paramType": "list",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "div_value",
"displayName": "div_value",
"description": "value used as an exponent to compute sizes of the clusters.",
"defaultValue": 4.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "head_bias",
"displayName": "head_bias",
"description": "If True, adds a bias term to the 'head' of the Adaptive softmax.",
"defaultValue": [
{
"name": "false",
"selected": False,
"displayName": "False"
},
{
"name": "true",
"selected": False,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
}
]
PYTORCH_ACTIVATION_PARAMETERS = [
{"name": "ELU", "selected": False, "displayName": "ELU",
"parameters": [obj for obj in PT_ACTIVATION_ELU_PARAMS]},
{"name": "Hardshrink", "selected": False, "displayName": "Hardshrink",
"parameters": [obj for obj in PT_ACTIVATION_Hardshrink_PARAMS]},
{"name": "Hardtanh", "selected": False, "displayName": "Hardtanh",
"parameters": [obj for obj in PT_ACTIVATION_Hardtanh_PARAMS]},
{"name": "LeakyReLU", "selected": False, "displayName": "LeakyReLU",
"parameters": [obj for obj in PT_ACTIVATION_LeakyReLU_PARAMS]},
{"name": "LogSigmoid", "selected": False, "displayName": "LogSigmoid", "parameters": None},
{"name": "MultiheadAttention", "selected": False, "displayName": "MultiheadAttention",
"parameters": [obj for obj in PT_ACTIVATION_MultiheadAttention_PARAMS]},
{"name": "PreLU", "selected": False, "displayName": "PreLU",
"parameters": [obj for obj in PT_ACTIVATION_PreLU_PARAMS]},
{"name": "ReLU", "selected": False, "displayName": "ReLU", "parameters": None},
{"name": "ReLU6", "selected": False, "displayName": "ReLU6", "parameters": None},
{"name": "RreLU", "selected": False, "displayName": "RreLU",
"parameters": [obj for obj in PT_ACTIVATION_RreLU_PARAMS]},
{"name": "SELU", "selected": False, "displayName": "SELU", "parameters": None},
{"name": "CELU", "selected": False, "displayName": "CELU",
"parameters": [obj for obj in PT_ACTIVATION_CELU_PARAMS]},
{"name": "GELU", "selected": False, "displayName": "GELU", "parameters": None},
{"name": "Sigmoid", "selected": False, "displayName": "Sigmoid", "parameters": None},
{"name": "Softplus", "selected": False, "displayName": "Softplus",
"parameters": [obj for obj in PT_ACTIVATION_Softplus_PARAMS]},
{"name": "Softshrink", "selected": False, "displayName": "Softshrink",
"parameters": [obj for obj in PT_ACTIVATION_Softshrink_PARAMS]},
{"name": "Softsign", "selected": False, "displayName": "Softsign", "parameters": None},
{"name": "Tanh", "selected": False, "displayName": "Tanh", "parameters": None},
{"name": "Tanhshrink", "selected": False, "displayName": "Tanhshrink", "parameters": None},
{"name": "Threshold", "selected": False, "displayName": "Threshold",
"parameters": [obj for obj in PT_ACTIVATION_Threshold_PARAMS]},
{"name": "Softmin", "selected": False, "displayName": "Softmin",
"parameters": [obj for obj in PT_ACTIVATION_Softmin_PARAMS]},
{"name": "Softmax", "selected": False, "displayName": "Softmax",
"parameters": [obj for obj in PT_ACTIVATION_Softmax_PARAMS]},
{"name": "Softmax2d", "selected": False, "displayName": "Softmax2d", "parameters": None},
{"name": "LogSoftmax", "selected": False, "displayName": "LogSoftmax",
"parameters": [obj for obj in PT_ACTIVATION_LogSoftmax_PARAMS]},
{"name": "AdaptiveLogSoftmaxWithLoss", "selected": False, "displayName": "AdaptiveLogSoftmaxWithLoss",
"parameters": [obj for obj in PT_ACTIVATION_AdaptiveLogSoftmaxWithLoss_PARAMS]}
]
PT_DROPOUT_P_PARAMS = [
{
"name": "p",
"displayName": "p",
"description": "probability of an element to be dropped.",
"defaultValue": 0.5,
"paramType": "number",
"uiElemType": "slider",
"display": True,
"valueRange": [0, 1],
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PYTORCH_DROPOUT_PARAMETERS = [
{"name": "Dropout", "selected": False, "displayName": "Dropout",
"parameters": [obj for obj in PT_DROPOUT_P_PARAMS]}
]
PT_BATCHNORMALIZATION_BatchNorm1d_PARAMS = [
{
"name": "num_features",
"displayName": "num_features",
"description": "C from an expected input of size (N,C,L) or L from input of size (N, L).",
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "eps",
"displayName": "eps",
"description": "a value added to the denominator for numerical stability.",
"defaultValue": 0.00001,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "momentum",
"displayName": "momentum",
"description": "the value used for the running_mean and running_var computation.",
"defaultValue": 0.1,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "affine",
"displayName": "affine",
"description": "a boolean value that when set to True, this module has learnable affine parameters, initialized the same way as done for batch normalization.",
"defaultValue": [
{
"name": "false",
"selected": False,
"displayName": "False"
},
{
"name": "true",
"selected": True,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"valueRange": [0, 1],
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
{
"name": "track_running_stats",
"displayName": "track_running_stats",
"description": "a boolean value that when set to True, this module tracks the running mean and variance, and when set to False, this module does not track such statistics and always uses batch statistics in both training and eval modes.",
"defaultValue": [
{
"name": "false",
"selected": False,
"displayName": "False"
},
{
"name": "true",
"selected": True,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"valueRange": [0, 1],
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
]
PYTORCH_BATCHNORMALIZATION_PARAMETERS = [
{"name": "BatchNorm1d", "selected": False, "displayName": "BatchNorm1d",
"parameters": [obj for obj in PT_BATCHNORMALIZATION_BatchNorm1d_PARAMS]}
]
PT_BIAS_INIT_Uniform_PARAMS = [
{
"name": "lower_bound",
"displayName": "lower bound",
"description": "Fills the input Tensor with values drawn from the uniform distribution U(lower_bound, upper_bound)",
"defaultValue": 0.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "upper_bound",
"displayName": "upper bound",
"description": "Fills the input Tensor with values drawn from the uniform distribution U(lower_bound, upper_bound)",
"defaultValue": 1.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
PT_BIAS_INIT_Normal_PARAMS = [
{
"name": "mean",
"displayName": "mean",
"description": "Fills the input Tensor with values drawn from the normal distribution,N(mean,std^2)",
"defaultValue": 0.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "std",
"displayName": "std",
"description": "Fills the input Tensor with values drawn from the normal distribution,N(mean,std^2)",
"defaultValue": 1.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
PT_BIAS_INIT_Constant_PARAMS = [
{
"name": "val",
"displayName": "val",
"description": "Fills the input Tensor with the value {val}",
"defaultValue": 1.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PYTORCH_BIAS_INIT_PARAMETERS = [
{"name": "Uniform", "selected": False, "displayName": "Uniform",
"parameters": [obj for obj in PT_BIAS_INIT_Uniform_PARAMS]},
{"name": "Normal", "selected": False, "displayName": "Normal",
"parameters": [obj for obj in PT_BIAS_INIT_Normal_PARAMS]},
{"name": "Constant", "selected": False, "displayName": "Constant",
"parameters": [obj for obj in PT_BIAS_INIT_Constant_PARAMS]},
{
"name": "Ones",
"displayName": "Ones",
"description": "Input Units parameter for the hidden layer.",
"selected": False,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "Zeros",
"displayName": "Zeros",
"description": "Input Units parameter for the hidden layer.",
"selected": False,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "Eyes",
"displayName": "Eyes",
"description": "Input Units parameter for the hidden layer.",
"selected": False,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "Default",
"displayName": "Default",
"description": "Input Units parameter for the hidden layer.",
"selected": False,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "Other",
"displayName": "Other",
"description": "Input Units parameter for the hidden layer.",
"selected": True,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
]
PT_WEIGHT_INIT_xavier_uniform_PARAMS = [
{
"name": "gain",
"displayName": "gain",
"description": "Fills the input Tensor with values drawn from the uniform distribution U(lower_bound, upper_bound)",
"defaultValue": 1.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
PT_WEIGHT_INIT_xavier_normal_PARAMS = [
{
"name": "gain",
"displayName": "gain",
"description": "Fills the input Tensor with values drawn from the uniform distribution U(lower_bound, upper_bound)",
"defaultValue": 1.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
PT_WEIGHT_INIT_Kaiming_Normal_PARAMS = [
{
"name": "a",
"displayName": "a",
"description": "Fills the input Tensor with values drawn from the uniform distribution U(lower_bound, upper_bound)",
"defaultValue": 0.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "mode",
"displayName": "mode",
"description": "Fills the input Tensor with values drawn from the uniform distribution U(lower_bound, upper_bound)",
"defaultValue": [
{
"name": "fan_in",
"selected": True,
"displayName": "fan_in"
},
{
"name": "fan_out",
"selected": False,
"displayName": "fan_out"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "nonlinearity",
"displayName": "nonlinearity",
"description": "Fills the input Tensor with values drawn from the uniform distribution U(lower_bound, upper_bound)",
"defaultValue": [
{
"name": "leaky_relu",
"selected": True,
"displayName": "leaky_relu"
},
{
"name": "relu",
"selected": False,
"displayName": "relu"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
]
PT_WEIGHT_INIT_Orthogonal_PARAMS = [
{
"name": "gain",
"displayName": "gain",
"description": "Fills the input Tensor with values drawn from the uniform distribution U(lower_bound, upper_bound)",
"defaultValue": 1.0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
PT_WEIGHT_INIT_Sparse_PARAMS = [
{
"name": "sparsity",
"displayName": "sparsity",
"description": "Fills the input Tensor with values drawn from the uniform distribution U(lower_bound, upper_bound)",
"defaultValue": 0.5,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "std",
"displayName": "std",
"description": "Fills the input Tensor with values drawn from the normal distribution,N(mean,std^2)",
"defaultValue": 0.01,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
PT_WEIGHT_CONSTRAINT_TRUE_PARAMS = [
{
"name": "min",
"displayName": "min",
"description": "minimum value.",
"defaultValue": 0.3,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "max",
"displayName": "max",
"description": "maximum value.",
"defaultValue": 0.7,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
PYTORCH_WEIGHT_INIT_PARAMETERS = [
{"name": "Uniform", "selected": False, "displayName": "Uniform",
"parameters": [obj for obj in PT_BIAS_INIT_Uniform_PARAMS]},
{"name": "Normal", "selected": False, "displayName": "Normal",
"parameters": [obj for obj in PT_BIAS_INIT_Normal_PARAMS]},
{"name": "Constant", "selected": False, "displayName": "Constant",
"parameters": [obj for obj in PT_BIAS_INIT_Constant_PARAMS]},
{
"name": "Ones",
"displayName": "Ones",
"description": "Input Units parameter for the hidden layer.",
"selected": False,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "Zeros",
"displayName": "Zeros",
"description": "Input Units parameter for the hidden layer.",
"selected": False,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "Eyes",
"displayName": "Eyes",
"description": "Input Units parameter for the hidden layer.",
"selected": False,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "Dirac",
"displayName": "Dirac",
"description": "Input Units parameter for the hidden layer.",
"selected": False,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{"name": "Xavier_Uniform", "selected": False, "displayName": "Xavier Uniform",
"parameters": [obj for obj in PT_WEIGHT_INIT_xavier_uniform_PARAMS]},
{"name": "Xavier_Normal", "selected": False, "displayName": "Xavier Normal",
"parameters": [obj for obj in PT_WEIGHT_INIT_xavier_normal_PARAMS]},
{"name": "Kaiming_Normal", "selected": False, "displayName": "Kaiming Normal",
"parameters": [obj for obj in PT_WEIGHT_INIT_Kaiming_Normal_PARAMS]},
{"name": "Orthogonal", "selected": False, "displayName": "Orthogonal",
"parameters": [obj for obj in PT_WEIGHT_INIT_Orthogonal_PARAMS]},
{"name": "Sparse", "selected": False, "displayName": "Sparse",
"parameters": [obj for obj in PT_WEIGHT_INIT_Sparse_PARAMS]},
{"name": "Default", "selected": True, "displayName": "Default",
"parameters": None},
]
PT_WEIGHT_CONSTRAINT_CONSTRAINT_PARAMS = [
{
"name": "constraint",
"displayName": "constraint",
"description": "constraint",
"defaultValue": [
{
"name": "True",
"selected": False,
"displayName": "True",
"parameters": [PT_WEIGHT_CONSTRAINT_TRUE_PARAMS]
},
{
"name": "False",
"selected": True,
"displayName": "False"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
]
PYTORCH_WEIGHT_CONSTRAINT_PARAMETERS = [
{"name": "constraint", "selected": True, "displayName": "constraint",
"parameters": [obj for obj in PT_WEIGHT_CONSTRAINT_CONSTRAINT_PARAMS]},
]
PT_BIAS_PARAMS = [
{
"name": "bias_init",
"displayName": "bias_init",
"description": "Bias initialisation parameter for the hidden layer.",
"defaultValue": [obj for obj in PYTORCH_BIAS_INIT_PARAMETERS],
"paramType": "list",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
]
PYTORCH_LINEAR_PARAMETERS = [
{
"name": "activation",
"displayName": "Activation",
"description": "Activation function for the hidden layer.",
"defaultValue": [obj for obj in PYTORCH_ACTIVATION_PARAMETERS],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "dropout",
"displayName": "Dropout",
"description": "During training, randomly zeroes some of the elements of the input tensor with probability p using samples from a Bernoulli distribution.",
"defaultValue": [obj for obj in PYTORCH_DROPOUT_PARAMETERS],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "batchnormalization",
"displayName": "Batch Normalization",
"description": "Applies Batch Normalization over a 2D or 3D input (a mini-batch of 1D inputs with optional additional channel dimension) as described in the paper.",
"defaultValue": [obj for obj in PYTORCH_BATCHNORMALIZATION_PARAMETERS],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "units_ip",
"displayName": "Input Units",
"description": "Input Units parameter for the hidden layer.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "units_op",
"displayName": "Output Units",
"description": "Output Units parameter for the hidden layer.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "bias_init",
"displayName": "bias_init",
"description": "Bias initialisation parameter for the hidden layer.",
"defaultValue": [obj for obj in PYTORCH_BIAS_INIT_PARAMETERS],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
{
"name": "weight_init",
"displayName": "weight_init",
"description": "Weight initialisation parameter for the hidden layer.",
"defaultValue": [obj for obj in PYTORCH_WEIGHT_INIT_PARAMETERS],
"paramType": "list",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "weight_constraint",
"displayName": "weight constraint",
"description": "clipping the Weights.",
"defaultValue": [
{
"name": "True",
"selected": False,
"displayName": "True",
"parameters": [PT_WEIGHT_CONSTRAINT_TRUE_PARAMS]
},
{
"name": "False",
"selected": True,
"displayName": "False"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
]
SKLEARN_ML_SUPPORTED_PT_LAYER = [
{"name": "Linear", "selected": True, "displayName": "Linear",
"parameters": [obj for obj in PYTORCH_LINEAR_PARAMETERS]}
]
PT_OPTIMIZER_Adadelta_PARAMETERS = [
{
"name": "rho",
"displayName": "rho",
"description": "coefficient used for computing a running average of squared gradients.",
"defaultValue": 0.9,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "eps",
"displayName": "eps",
"description": "term added to the denominator to improve numerical stability.",
"defaultValue": 0.000001,
"paramType": "number",
"uiElemType": "textBox",
"valueRange": [0.000001, 1],
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "lr",
"displayName": "lr",
"description": "coefficient that scale delta before it is applied to the parameters.",
"defaultValue": 1.0,
"valueRange": [0.0, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "weight_decay",
"displayName": "weight_decay",
"description": "weight decay (L2 penalty).",
"defaultValue": 0,
"valueRange": [0.0, 0.1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_OPTIMIZER_Adagrad_PARAMETERS = [
{
"name": "lr",
"displayName": "lr",
"description": "learning rate.",
"defaultValue": 0.01,
"valueRange": [0.0, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "lr_decay",
"displayName": "lr_decay",
"description": " learning rate decay.",
"defaultValue": 0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "weight_decay",
"displayName": "weight_decay",
"description": "weight decay (L2 penalty).",
"defaultValue": 0,
"valueRange": [0.0, 0.1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "eps",
"displayName": "eps",
"description": "term added to the denominator to improve numerical stability.",
"defaultValue": 0.0000000001,
"valueRange": [0.0000000001, 1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
PT_OPTIMIZER_Adam_PARAMETERS = [
{
"name": "lr",
"displayName": "lr",
"description": "learning rate.",
"defaultValue": 0.001,
"valueRange": [0.0, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "betas",
"displayName": "betas",
"description": "coefficients used for computing running averages of gradient and its square.",
"defaultValue": [0.9, 0.999],
"valueRange": [[0.0, 1.0], [0.0, 1.0]],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "eps",
"displayName": "eps",
"description": "term added to the denominator to improve numerical stability.",
"defaultValue": 0.00000001,
"valueRange": [0.0000000001, 1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "weight_decay",
"displayName": "weight_decay",
"description": "weight decay (L2 penalty).",
"defaultValue": 0,
"valueRange": [0.0, 0.1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "amsgrad",
"displayName": "amsgrad",
"description": "whether to use the AMSGrad variant of this algorithm from the paper.",
"defaultValue": [
{
"name": "false",
"selected": True,
"displayName": "False"
},
{
"name": "true",
"selected": False,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
}
]
PT_OPTIMIZER_AdamW_PARAMETERS = [
{
"name": "lr",
"displayName": "lr",
"description": "learning rate.",
"defaultValue": 0.001,
"valueRange": [0.0, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "betas",
"displayName": "betas",
"description": "coefficients used for computing running averages of gradient and its square.",
"defaultValue": [0.9, 0.999],
"valueRange": [[0.0, 1.0], [0.0, 1.0]],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "eps",
"displayName": "eps",
"description": "term added to the denominator to improve numerical stability.",
"defaultValue": 0.00000001,
"valueRange": [0.00000001, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "weight_decay",
"displayName": "weight_decay",
"description": "weight decay (L2 penalty).",
"defaultValue": 0.01,
"valueRange": [0.0, 0.1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "amsgrad",
"displayName": "amsgrad",
"description": "whether to use the AMSGrad variant of this algorithm from the paper.",
"defaultValue": [
{
"name": "false",
"selected": True,
"displayName": "False"
},
{
"name": "true",
"selected": False,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
}
]
PT_OPTIMIZER_SparseAdam_PARAMETERS = [
{
"name": "lr",
"displayName": "lr",
"description": "learning rate.",
"defaultValue": 0.001,
"valueRange": [0.0, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "betas",
"displayName": "betas",
"description": "coefficients used for computing running averages of gradient and its square.",
"defaultValue": [0.9, 0.999],
"valueRange": [[0.0, 1.0], [0.0, 1.0]],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "eps",
"displayName": "eps",
"description": "term added to the denominator to improve numerical stability.",
"defaultValue": 0.00000001,
"valueRange": [0.00000001, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_OPTIMIZER_Adamax_PARAMETERS = [
{
"name": "lr",
"displayName": "lr",
"description": "learning rate.",
"defaultValue": 0.001,
"valueRange": [0.0, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "betas",
"displayName": "betas",
"description": "coefficients used for computing running averages of gradient and its square.",
"defaultValue": [0.9, 0.999],
"valueRange": [[0.0, 1.0], [0.0, 1.0]],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "eps",
"displayName": "eps",
"description": "term added to the denominator to improve numerical stability.",
"defaultValue": 0.00000001,
"valueRange": [0.00000001, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "weight_decay",
"displayName": "weight_decay",
"description": "weight decay (L2 penalty).",
"defaultValue": 0,
"valueRange": [0.0, 0.1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
PT_OPTIMIZER_ASGD_PARAMETERS = [
{
"name": "lr",
"displayName": "lr",
"description": "learning rate.",
"defaultValue": 0.01,
"valueRange": [0.0, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "lambd",
"displayName": "lambd",
"description": "decay term.",
"defaultValue": 0.0001,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "alpha",
"displayName": "alpha",
"description": "power for eta update.",
"defaultValue": 0.75,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "t0",
"displayName": "t0",
"description": "point at which to start averaging.",
"defaultValue": 0.000001,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "weight_decay",
"displayName": "weight_decay",
"description": "weight decay (L2 penalty).",
"defaultValue": 0,
"valueRange": [0.0, 0.1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_OPTIMIZER_LBFGS_PARAMETERS = [
{
"name": "lr",
"displayName": "lr",
"description": "learning rate.",
"defaultValue": 1,
"valueRange": [0.0, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "max_iter",
"displayName": "max_iter",
"description": "maximal number of iterations per optimization step.",
"defaultValue": 20,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "max_eval",
"displayName": "max_eval",
"description": "maximal number of function evaluations per optimization step.",
"defaultValue": 25,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "tolerance_grad",
"displayName": "tolerance_grad",
"description": " termination tolerance on first order optimality.",
"defaultValue": 0.00001,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "tolerance_change",
"displayName": "tolerance_change",
"description": "termination tolerance on function value/parameter changes.",
"defaultValue": 0.000000001,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "history_size",
"displayName": "history_size",
"description": "update history size.",
"defaultValue": 100,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "line_search_fn",
"displayName": "line_search_fn",
"description": "either 'strong_wolfe' or None.",
"defaultValue": [
{
"name": "None",
"selected": True,
"displayName": "None"
},
{
"name": "strong_wolfe",
"selected": False,
"displayName": "strong_wolfe"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
}
]
PT_OPTIMIZER_RMSprop_PARAMETERS = [
{
"name": "lr",
"displayName": "lr",
"description": "learning rate.",
"defaultValue": 0.01,
"valueRange": [0.0, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "momentum",
"displayName": "momentum",
"description": "momentum factor.",
"defaultValue": 0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "alpha",
"displayName": "alpha",
"description": "smoothing constant.",
"defaultValue": 0.99,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "eps",
"displayName": "eps",
"description": "term added to the denominator to improve numerical stability.",
"defaultValue": 0.00000001,
"valueRange": [0.00000001, 1.0],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "centered",
"displayName": "centered",
"description": "if True, compute the centered RMSProp, the gradient is normalized By an estimation of its variance.",
"defaultValue": [
{
"name": "false",
"selected": False,
"displayName": "False"
},
{
"name": "true",
"selected": True,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
{
"name": "weight_decay",
"displayName": "weight_decay",
"description": "weight decay (L2 penalty).",
"defaultValue": 0,
"valueRange": [0, 0.1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
PT_OPTIMIZER_Rprop_PARAMETERS = [
{
"name": "lr",
"displayName": "lr",
"description": "learning rate.",
"defaultValue": 0.01,
"valueRange": [0, 1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "eta",
"displayName": "eta",
"description": "pair of (etaminus, etaplUs), that are multiplicative.",
"defaultValue": [0.5, 1.2],
"valueRange": [[0.0, 5.0], [0.0, 5.0]],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "step_sizes",
"displayName": "step_sizes",
"description": "a pair of minimal and maximal allowed step sizes.",
"defaultValue": [0.000001, 50],
"valueRange": [[0.0, 5.0], [0.0, 5.0]],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
}
]
PT_OPTIMIZER_SGD_PARAMETERS = [
{
"name": "lr",
"displayName": "lr",
"description": "learning rate.",
"defaultValue": 0.1,
"valueRange": [0, 1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "momentum",
"displayName": "momentum",
"description": "momentum factor.",
"defaultValue": 0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "weight_decay",
"displayName": "weight_decay",
"description": "weight decay (L2 penalty).",
"defaultValue": 0,
"valueRange": [0, 0.1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "dampening",
"displayName": "dampening",
"description": "dampening for momentum.",
"defaultValue": 0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "nesterov",
"displayName": "nesterov",
"description": "enables Nesterov momentum.",
"defaultValue": [
{
"name": "false",
"selected": False,
"displayName": "False"
},
{
"name": "true",
"selected": False,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
]
SKLEARN_ML_SUPPORTED_PT_OPTIMIZER_PARAMETERS = [
{"name": "Adadelta", "selected": False, "displayName": "Adadelta",
"parameters": [obj for obj in PT_OPTIMIZER_Adadelta_PARAMETERS]},
{"name": "Adagrad", "selected": False, "displayName": "Adagrad",
"parameters": [obj for obj in PT_OPTIMIZER_Adagrad_PARAMETERS]},
{"name": "Adam", "selected": False, "displayName": "Adam",
"parameters": [obj for obj in PT_OPTIMIZER_Adam_PARAMETERS]},
{"name": "AdamW", "selected": False, "displayName": "AdamW",
"parameters": [obj for obj in PT_OPTIMIZER_AdamW_PARAMETERS]},
{"name": "SparseAdam", "selected": False, "displayName": "SparseAdam",
"parameters": [obj for obj in PT_OPTIMIZER_SparseAdam_PARAMETERS]},
{"name": "Adamax", "selected": False, "displayName": "Adamax",
"parameters": [obj for obj in PT_OPTIMIZER_Adamax_PARAMETERS]},
{"name": "ASGD", "selected": False, "displayName": "ASGD",
"parameters": [obj for obj in PT_OPTIMIZER_ASGD_PARAMETERS]},
{"name": "LBFGS", "selected": False, "displayName": "LBFGS",
"parameters": [obj for obj in PT_OPTIMIZER_LBFGS_PARAMETERS]},
{"name": "RMSprop", "selected": False, "displayName": "RMSprop",
"parameters": [obj for obj in PT_OPTIMIZER_RMSprop_PARAMETERS]},
{"name": "Rprop", "selected": False, "displayName": "Rprop",
"parameters": [obj for obj in PT_OPTIMIZER_Rprop_PARAMETERS]},
{"name": "SGD", "selected": False, "displayName": "SGD", "parameters": [obj for obj in PT_OPTIMIZER_SGD_PARAMETERS]}
]
PT_LOSS_CrossEntropyLoss_PARAMETERS = [
{
"name": "weight",
"displayName": "weight",
"description": "a manual rescaling weight given to each class. If given, has to be a Tensor of size C.",
"paramType": "tensor",
"defaultValue": None,
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["tensor"],
"allowedDataType": ["tensor"]
},
{
"name": "ignore_index",
"displayName": "ignore_index",
"description": "Specifies a target value that is ignored and does not contribute to the input gradient.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "reduction",
"displayName": "reduction",
"description": "Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.",
"defaultValue": "mean",
"paramType": "list",
"valueRange": ["none", "mean", "sum"],
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
}
]
PT_LOSS_CTCLoss_PARAMETERS = [
{
"name": "blank",
"displayName": "blank",
"description": "blank label.",
"defaultValue": 0,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "reduction",
"displayName": "reduction",
"description": "Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.",
"defaultValue": "mean",
"paramType": "list",
"valueRange": ["none", "mean", "sum"],
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "zero_infinity",
"displayName": "zero_infinity",
"description": "Whether to zero infinite losses and the associated gradients.",
"defaultValue": [
{
"name": "false",
"selected": True,
"displayName": "False"
},
{
"name": "true",
"selected": False,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
}
]
PT_LOSS_NLLLoss_PARAMETERS = [
{
"name": "weight",
"displayName": "weight",
"description": "a manual rescaling weight given to each class. If given, has to be a Tensor of size C.",
"paramType": "tensor",
"defaultValue": None,
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["tensor"],
"allowedDataType": ["tensor"]
},
{
"name": "ignore_index",
"displayName": "ignore_index",
"description": "Specifies a target value that is ignored and does not contribute to the input gradient.",
"defaultValue": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "reduction",
"displayName": "reduction",
"description": "Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.",
"defaultValue": "mean",
"paramType": "list",
"valueRange": ["none", "mean", "sum"],
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
}
]
PT_LOSS_PoissonNLLLoss_PARAMETERS = [
{
"name": "log_input",
"displayName": "log_input",
"description": "if True the loss is computed as exp(input)-target*input.",
"defaultValue": [
{
"name": "false",
"selected": False,
"displayName": "False"
},
{
"name": "true",
"selected": False,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
{
"name": "full",
"displayName": "full",
"description": "whether to compute full loss, i. e. to add the Stirling approximation term.",
"defaultValue": [
{
"name": "false",
"selected": False,
"displayName": "False"
},
{
"name": "true",
"selected": False,
"displayName": "True"
}
],
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["bool"],
"allowedDataType": ["bool"]
},
{
"name": "eps",
"displayName": "eps",
"description": "small value to avoid evaluation of log(0) when log_input = False.",
"defaultValue": 0.00000001,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
{
"name": "reduction",
"displayName": "reduction",
"description": "Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.",
"defaultValue": "mean",
"paramType": "list",
"valueRange": ["none", "mean", "sum"],
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
}
]
PT_LOSS_BCELoss_PARAMETERS = [
{
"name": "weight",
"displayName": "weight",
"description": "a manual rescaling weight given to each class. If given, has to be a Tensor of size C.",
"paramType": "tensor",
"defaultValue": None,
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["tensor"],
"allowedDataType": ["tensor"]
},
{
"name": "reduction",
"displayName": "reduction",
"description": "Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.",
"defaultValue": "mean",
"paramType": "list",
"valueRange": ["none", "mean", "sum"],
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
}
]
PT_LOSS_BCEWithLogitsLoss_PARAMETERS = [
{
"name": "weight",
"displayName": "weight",
"description": "a manual rescaling weight given to each class. If given, has to be a Tensor of size C.",
"paramType": "tensor",
"defaultValue": None,
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["tensor"],
"allowedDataType": ["tensor"]
},
{
"name": "reduction",
"displayName": "reduction",
"description": "Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.",
"defaultValue": "mean",
"paramType": "list",
"valueRange": ["none", "mean", "sum"],
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "pos_weight",
"displayName": "pos_weight",
"description": "a weight of positive examples. Must be a vector with length equal to the number of classes.",
"defaultValue": "mean",
"paramType": "tensor",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["tensor"],
"allowedDataType": ["tensor"]
},
]
PT_LOSS_SoftMarginLoss_PARAMETERS = [
{
"name": "reduction",
"displayName": "reduction",
"description": "Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.",
"defaultValue": "mean",
"paramType": "list",
"valueRange": ["none", "mean", "sum"],
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
}
]
SKLEARN_ML_SUPPORTED_PT_LOSS_PARAMS = [
{"name": "CrossEntropyLoss", "selected": False, "displayName": "CrossEntropyLoss",
"parameters": [obj for obj in PT_LOSS_CrossEntropyLoss_PARAMETERS]},
{"name": "CTCLoss", "selected": False, "displayName": "CTCLoss",
"parameters": [obj for obj in PT_LOSS_CTCLoss_PARAMETERS]},
{"name": "NLLLoss", "selected": False, "displayName": "NLLLoss",
"parameters": [obj for obj in PT_LOSS_NLLLoss_PARAMETERS]},
{"name": "PoissonNLLLoss", "selected": False, "displayName": "PoissonNLLLoss",
"parameters": [obj for obj in PT_LOSS_PoissonNLLLoss_PARAMETERS]},
{"name": "BCELoss", "selected": False, "displayName": "BCELoss",
"parameters": [obj for obj in PT_LOSS_BCELoss_PARAMETERS]},
{"name": "BCEWithLogitsLoss", "selected": False, "displayName": "BCEWithLogitsLoss",
"parameters": [obj for obj in PT_LOSS_BCEWithLogitsLoss_PARAMETERS]},
{"name": "SoftMarginLoss", "selected": False, "displayName": "SoftMarginLoss",
"parameters": [obj for obj in PT_LOSS_SoftMarginLoss_PARAMETERS]}
]
SKLEARN_ML_SUPPORTED_PT_L1_REGULARIZER_PARAMETERS = [
{
"name": "l1_decay",
"selected": False,
"displayName": "l1_decay",
"description": "l1 decay.",
"defaultValue": 0.0,
"valueRange": [0, 1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
SKLEARN_ML_SUPPORTED_PT_L2_REGULARIZER_PARAMETERS = [
{
"name": "l2_decay",
"selected": False,
"displayName": "l2_decay",
"description": "l2 dacay.",
"defaultValue": 0.0,
"valueRange": [0, 1],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["float"],
"allowedDataType": ["float"]
},
]
SKLEARN_ML_SUPPORTED_PT_REGULARIZER_PARAMETERS = [
{"name": "l1_regularizer", "selected": False, "displayName": "l1_regularizer",
"parameters": [obj for obj in SKLEARN_ML_SUPPORTED_PT_L1_REGULARIZER_PARAMETERS]},
{"name": "l2_regularizer", "selected": False, "displayName": "l2_regularizer",
"parameters": [obj for obj in SKLEARN_ML_SUPPORTED_PT_L2_REGULARIZER_PARAMETERS]},
]
SKLEARN_ML_PYTORCH_CLASSIFICATION_PARAMS = [
{
"name": "layer",
"displayName": "Layer",
"description": "A layer is a class implementing common Neural Networks Operations, such as convolution, batch norm, etc.",
"defaultValue": [obj for obj in SKLEARN_ML_SUPPORTED_PT_LAYER],
"acceptedValue": None,
"valueRange": None,
"paramType": "list",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "loss",
"displayName": "Loss",
"description": "The function used to evaluate the candidate solution (i.e. a set of weights).",
"defaultValue": [obj for obj in SKLEARN_ML_SUPPORTED_PT_LOSS_PARAMS],
"acceptedValue": None,
"valueRange": None,
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "optimizer",
"displayName": "Optimizer",
"description": "Method used to minimize the loss function.",
"defaultValue": [obj for obj in SKLEARN_ML_SUPPORTED_PT_OPTIMIZER_PARAMETERS],
"acceptedValue": None,
"valueRange": None,
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "regularizer",
"displayName": "regularizer",
"description": "Regularizer function.",
"defaultValue": [obj for obj in SKLEARN_ML_SUPPORTED_PT_REGULARIZER_PARAMETERS],
"acceptedValue": None,
"valueRange": None,
"paramType": "list",
"uiElemType": "checkbox",
"display": True,
"hyperpatameterTuningCandidate": True,
"expectedDataType": ["string"],
"allowedDataType": ["string"]
},
{
"name": "batch_size",
"displayName": "Batch Size",
"description": "The number of training examples in one Forward/Backward Pass.",
"defaultValue": 0,
"acceptedValue": None,
"valueRange": [0, 100],
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": False,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
},
{
"name": "number_of_epochs",
"displayName": "Number of Epochs",
"description": "An epoch refers to one cycle through the full training data-set.",
"defaultValue": 100,
"acceptedValue": None,
"valueRange": None,
"paramType": "number",
"uiElemType": "textBox",
"display": True,
"hyperpatameterTuningCandidate": False,
"expectedDataType": ["int"],
"allowedDataType": ["int"]
}
]
| [
"[email protected]"
] | |
a29bbe7d2c8cb74beba2552e92cb4abc19df3926 | b773ff8595421fb743e55f7bc0190791f2ece7a2 | /backend/home/migrations/0002_load_initial_data.py | eb446cfe1a0a9a986553ca35b2a5b469e122a3f3 | [] | no_license | crowdbotics-apps/msm-tc208-fzjohztpg-12768 | d68746372f604aa5ec805c7c4c480eb451d2b96d | 016bfac5d6497dbd88b49eddc4b8f74788161c83 | refs/heads/master | 2022-12-28T04:01:12.567205 | 2020-10-06T05:28:05 | 2020-10-06T05:28:05 | 301,622,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "MSM-TC208-fzjohztpgt"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">MSM-TC208-fzjohztpgt</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "msm-tc208-fzjohztpg-12768.botics.co"
site_params = {
"name": "MSM-TC208-fzjohztpgt",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"[email protected]"
] | |
144f59685bb10c3354166f6418c4dafff8ef54e3 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/ospf/actxpol.py | eb68c9228b8ae67d290caf769569822d2309626a | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 10,769 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class ACtxPol(Mo):
meta = ClassMeta("cobra.model.ospf.ACtxPol")
meta.isAbstract = True
meta.moClassName = "ospfACtxPol"
meta.moClassName = "ospfACtxPol"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Abstraction of OSPF Context Policy"
meta.writeAccessMask = 0x0
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.superClasses.add("cobra.model.fabric.L3CtxPol")
meta.superClasses.add("cobra.model.fabric.ProtoPol")
meta.superClasses.add("cobra.model.fabric.ProtoDomPol")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.superClasses.add("cobra.model.fabric.L3DomPol")
meta.concreteSubClasses.add("cobra.model.ospf.CtxDef")
meta.concreteSubClasses.add("cobra.model.ospf.CtxDefAf")
meta.concreteSubClasses.add("cobra.model.ospf.CtxPol")
meta.rnPrefixes = [
]
prop = PropMeta("str", "bwRef", "bwRef", 1089, PropCategory.REGULAR)
prop.label = "Bandwidth Preference"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 4000000)]
prop.defaultValue = 40000
prop.defaultValueStr = "40000"
meta.props.add("bwRef", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "ctrl", "ctrl", 22755, PropCategory.REGULAR)
prop.label = "Control knobs"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 0
prop._addConstant("name-lookup", "enable-name-lookup-for-router-ids", 2)
prop._addConstant("pfx-suppress", "prefix-suppression", 1)
meta.props.add("ctrl", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dist", "dist", 1087, PropCategory.REGULAR)
prop.label = "Distance Preference"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 255)]
prop.defaultValue = 110
prop.defaultValueStr = "110"
meta.props.add("dist", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "grCtrl", "grCtrl", 1098, PropCategory.REGULAR)
prop.label = "Graceful Restart Controls"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "helper"
prop._addConstant("helper", "graceful-restart-helper", 1)
meta.props.add("grCtrl", prop)
prop = PropMeta("str", "lsaArrivalIntvl", "lsaArrivalIntvl", 1094, PropCategory.REGULAR)
prop.label = "Min Arrival Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(10, 600000)]
prop.defaultValue = 1000
prop.defaultValueStr = "1000"
meta.props.add("lsaArrivalIntvl", prop)
prop = PropMeta("str", "lsaGpPacingIntvl", "lsaGpPacingIntvl", 1093, PropCategory.REGULAR)
prop.label = "Pacing Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 1800)]
prop.defaultValue = 10
prop.defaultValueStr = "10"
meta.props.add("lsaGpPacingIntvl", prop)
prop = PropMeta("str", "lsaHoldIntvl", "lsaHoldIntvl", 1096, PropCategory.REGULAR)
prop.label = "Throttle Hold Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(50, 30000)]
prop.defaultValue = 5000
prop.defaultValueStr = "5000"
meta.props.add("lsaHoldIntvl", prop)
prop = PropMeta("str", "lsaMaxIntvl", "lsaMaxIntvl", 1097, PropCategory.REGULAR)
prop.label = "Throttle Max Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(50, 30000)]
prop.defaultValue = 5000
prop.defaultValueStr = "5000"
meta.props.add("lsaMaxIntvl", prop)
prop = PropMeta("str", "lsaStartIntvl", "lsaStartIntvl", 1095, PropCategory.REGULAR)
prop.label = "Throttle Start Wait Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 5000)]
prop.defaultValue = 0
prop.defaultValueStr = "0"
meta.props.add("lsaStartIntvl", prop)
prop = PropMeta("str", "maxEcmp", "maxEcmp", 1088, PropCategory.REGULAR)
prop.label = "Max ECMP"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 64)]
prop.defaultValue = 8
prop.defaultValueStr = "8"
meta.props.add("maxEcmp", prop)
prop = PropMeta("str", "maxLsaAction", "maxLsaAction", 17808, PropCategory.REGULAR)
prop.label = "Action"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "reject"
prop._addConstant("log", "log", 2)
prop._addConstant("reject", "reject", 0)
prop._addConstant("restart", "restart", 1)
meta.props.add("maxLsaAction", prop)
prop = PropMeta("str", "maxLsaNum", "maxLsaNum", 17803, PropCategory.REGULAR)
prop.label = "Maximum # of non self-generated LSAs"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 4294967295)]
prop.defaultValue = 20000
prop.defaultValueStr = "20000"
meta.props.add("maxLsaNum", prop)
prop = PropMeta("str", "maxLsaResetIntvl", "maxLsaResetIntvl", 17807, PropCategory.REGULAR)
prop.label = "Reset Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 1440)]
prop.defaultValue = 10
prop.defaultValueStr = "10"
meta.props.add("maxLsaResetIntvl", prop)
prop = PropMeta("str", "maxLsaSleepCnt", "maxLsaSleepCnt", 17805, PropCategory.REGULAR)
prop.label = "Sleep Count"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 4294967295)]
prop.defaultValue = 5
prop.defaultValueStr = "5"
meta.props.add("maxLsaSleepCnt", prop)
prop = PropMeta("str", "maxLsaSleepIntvl", "maxLsaSleepIntvl", 17806, PropCategory.REGULAR)
prop.label = "Sleep Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 1440)]
prop.defaultValue = 5
prop.defaultValueStr = "5"
meta.props.add("maxLsaSleepIntvl", prop)
prop = PropMeta("str", "maxLsaThresh", "maxLsaThresh", 17804, PropCategory.REGULAR)
prop.label = "Threshold"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 100)]
prop.defaultValue = 75
prop.defaultValueStr = "75"
meta.props.add("maxLsaThresh", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "spfHoldIntvl", "spfHoldIntvl", 1091, PropCategory.REGULAR)
prop.label = "Max Hold Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 600000)]
prop.defaultValue = 1000
prop.defaultValueStr = "1000"
meta.props.add("spfHoldIntvl", prop)
prop = PropMeta("str", "spfInitIntvl", "spfInitIntvl", 1090, PropCategory.REGULAR)
prop.label = "Initial Delay Interval"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 600000)]
prop.defaultValue = 200
prop.defaultValueStr = "200"
meta.props.add("spfInitIntvl", prop)
prop = PropMeta("str", "spfMaxIntvl", "spfMaxIntvl", 1092, PropCategory.REGULAR)
prop.label = "Min Wait Time"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 600000)]
prop.defaultValue = 5000
prop.defaultValueStr = "5000"
meta.props.add("spfMaxIntvl", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
8f10397edc7ab65ffd5da0b053bac750e2a65976 | 552556631580799b16d0fb31e8f10850383ef3b2 | /ex3/outputs/hmmer/hmmer.DW_32-WS_384.out/info.py | ac221e133c77455ceb86e9ee7263f6d24f0a78b6 | [] | no_license | gregth/NTUA-advcomparch | f19ee414f8b77f749a09f263feb980350f88880d | bc501f427ddf1423f851ce1a052dc335183c5103 | refs/heads/master | 2022-11-14T20:11:49.035503 | 2020-06-27T09:17:43 | 2020-06-27T09:17:43 | 262,262,423 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 18,779 | py | power = {'BUSES': {'Area': 6.71959,
'Bus/Area': 6.71959,
'Bus/Gate Leakage': 0.0180267,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.187981,
'Bus/Subthreshold Leakage with power gating': 0.070493,
'Gate Leakage': 0.0180267,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.187981,
'Subthreshold Leakage with power gating': 0.070493},
'Core': [{'Area': 1204.13,
'Execution Unit/Area': 1132.45,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 4.60328,
'Execution Unit/Instruction Scheduler/Area': 1094.64,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.344008,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00151512,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.76857,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 1.20329,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0203725,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.0110394,
'Execution Unit/Instruction Scheduler/Gate Leakage': 4.20257,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 65.9732,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.340924,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 132.987,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 12.0751,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 5.16833,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 2.94537,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 49009.7,
'Execution Unit/Instruction Scheduler/ROB/Area': 1028.32,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 3.86013,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 48875.0,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 3945.62,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 45.9077,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 17.2608,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 3958.9,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 51.0964,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 20.2172,
'Execution Unit/Integer ALUs/Area': 3.76696,
'Execution Unit/Integer ALUs/Gate Leakage': 0.212233,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.840147,
'Execution Unit/Integer ALUs/Runtime Dynamic': 1.03156,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 3.21776,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 1.20666,
'Execution Unit/Peak Dynamic': 49013.1,
'Execution Unit/Register Files/Area': 28.3565,
'Execution Unit/Register Files/Floating Point RF/Area': 9.63068,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.00825445,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.193132,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.128663,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.0497709,
'Execution Unit/Register Files/Gate Leakage': 0.026159,
'Execution Unit/Register Files/Integer RF/Area': 18.7258,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.0179046,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 1.57228,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 1.60801,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.273097,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.103541,
'Execution Unit/Register Files/Peak Dynamic': 1.57228,
'Execution Unit/Register Files/Runtime Dynamic': 1.80115,
'Execution Unit/Register Files/Subthreshold Leakage': 0.40176,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.153311,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.436407,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.0569945,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.845842,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 22.6045,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.864121,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.324045,
'Execution Unit/Runtime Dynamic': 3984.84,
'Execution Unit/Subthreshold Leakage': 56.9352,
'Execution Unit/Subthreshold Leakage with power gating': 22.4095,
'Gate Leakage': 5.24531,
'Instruction Fetch Unit/Area': 21.8028,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00137507,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00137507,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00118832,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000454898,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00340304,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0073415,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0135185,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.116748,
'Instruction Fetch Unit/Instruction Buffer/Area': 2.64509,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 0.0346434,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 497.007,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 23.3152,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.290984,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.110014,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.446139,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 14.8639,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 10.9923,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.773492,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 517.199,
'Instruction Fetch Unit/Runtime Dynamic': 24.5557,
'Instruction Fetch Unit/Subthreshold Leakage': 1.36137,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.5701,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0585066,
'L2/Runtime Dynamic': 0.0397313,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 9.12408,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 8.91832,
'Load Store Unit/Data Cache/Runtime Dynamic': 4.79319,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0582639,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.248505,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.319138,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 12.3086,
'Load Store Unit/Runtime Dynamic': 6.68621,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.612772,
'Load Store Unit/StoreQ/Runtime Dynamic': 1.57388,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.730943,
'Load Store Unit/Subthreshold Leakage with power gating': 0.335652,
'Memory Management Unit/Area': 0.74897,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.217475,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.280163,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.0312611,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0731451,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 3.14649,
'Memory Management Unit/Runtime Dynamic': 0.353309,
'Memory Management Unit/Subthreshold Leakage': 0.216232,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0921915,
'Peak Dynamic': 56715.0,
'Renaming Unit/Area': 31.0758,
'Renaming Unit/FP Front End RAT/Area': 0.284555,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00465468,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 23.4847,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0482834,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0275216,
'Renaming Unit/Free List/Area': 8.22312,
'Renaming Unit/Free List/Gate Leakage': 0.00130004,
'Renaming Unit/Free List/Peak Dynamic': 7.12809,
'Renaming Unit/Free List/Runtime Dynamic': 1.00316,
'Renaming Unit/Free List/Subthreshold Leakage': 0.0311556,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.0167994,
'Renaming Unit/Gate Leakage': 0.296443,
'Renaming Unit/Int Front End RAT/Area': 22.2087,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.264049,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 7136.03,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 498.726,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 3.48901,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 1.98873,
'Renaming Unit/Peak Dynamic': 7169.17,
'Renaming Unit/Runtime Dynamic': 499.729,
'Renaming Unit/Subthreshold Leakage': 3.72773,
'Renaming Unit/Subthreshold Leakage with power gating': 2.09279,
'Runtime Dynamic': 4516.21,
'Subthreshold Leakage': 65.6835,
'Subthreshold Leakage with power gating': 26.6055}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 1.4122186516949031,
'Runtime Dynamic': 1.4122186516949031,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.0752572,
'Runtime Dynamic': 0.0809061,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 1272.76,
'Gate Leakage': 5.31175,
'Peak Dynamic': 56715.1,
'Peak Power': 56793.1,
'Runtime Dynamic': 4516.29,
'Subthreshold Leakage': 72.6723,
'Subthreshold Leakage with power gating': 30.5195,
'Total Cores/Area': 1204.13,
'Total Cores/Gate Leakage': 5.24531,
'Total Cores/Peak Dynamic': 56715.0,
'Total Cores/Runtime Dynamic': 4516.21,
'Total Cores/Subthreshold Leakage': 65.6835,
'Total Cores/Subthreshold Leakage with power gating': 26.6055,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.0752572,
'Total L3s/Runtime Dynamic': 0.0809061,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 77.9841,
'Total NoCs/Area': 6.71959,
'Total NoCs/Gate Leakage': 0.0180267,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.187981,
'Total NoCs/Subthreshold Leakage with power gating': 0.070493}} | [
"[email protected]"
] | |
efa2c847437a4664e7fb5a130f7dc6e093b737e4 | c115ba8fc9acc9bd2fd886a9507247e02e0c1035 | /_core/config/base.py | 772cce5b3eb76dbf2a447f35d3df4b365b7390fc | [] | no_license | ingafter60/completedjangoblog | bf368740f1171819689f231edd0e3ae4a61083de | df0f84630976d43e74d916dbf4a8cb4444176f7f | refs/heads/master | 2021-04-03T19:04:06.463307 | 2020-03-20T00:40:55 | 2020-03-20T00:40:55 | 248,388,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,909 | py | """
Django settings for _core project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g%fyss+5dnhw7q*3do!y(kug#_#2@1$j^4(!k19iuzqx7zbb)w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# new
'ckeditor',
'apps.base',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = '_core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '_core.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
7d5d3566f8081503f4981349ffda1b3bdbd985ce | 469456a0bf4c7c040a8136b111195a82de17cc6b | /rgb_module.py | 40e20d6431581e0f70174a91bfb45c03869c9133 | [] | no_license | 6reg/function_fun | d2e73387e78396618d074c97a2b0088647693b4e | 64bf2d6211f9bdcc030922a7f5292c37a31eddd9 | refs/heads/main | 2023-07-07T23:42:46.426225 | 2021-08-07T22:10:47 | 2021-08-07T22:10:47 | 370,854,414 | 0 | 1 | null | 2021-07-07T02:20:18 | 2021-05-25T23:39:07 | Python | UTF-8 | Python | false | false | 666 | py | DATA_FILE = "./rgb_values.txt"
def load_data():
data = {}
file = open(DATA_FILE)
n_colors = 0
for line in file:
line = line.strip()
add_color(data, line)
n_colors += 1
print(len(data), n_colors)
file.close()
return data
def add_color(data, line):
parts = line.split(',')
color_name = parts[0]
color_rgb = color_from_line(line)
if color_name not in data:
data[color_name] = []
data[color_name].append(color_rgb)
def color_from_line(line):
parts = line.split(',')
print(parts)
r = int(parts[1])
g = int(parts[2])
b = int(parts[3])
return [r, g, b]
load_data()
| [
"[email protected]"
] | |
fe75aa03e43019e1066a7e638a69ca2612f323d6 | 91ca2b4215b74c3b3f2517baab9205997c9bcf62 | /maps/migrations/0009_auto_20180205_1945.py | 319e4b341f7faee495ba53495621ba57b938524c | [
"Apache-2.0"
] | permissive | swiftlabUAS/SwiftUTM | ae0ca347058563a040c24b740a5760187e507879 | caf40195b017ab980323cf88bf95e671e38a2676 | refs/heads/master | 2022-12-16T00:02:01.766221 | 2020-09-22T14:27:12 | 2020-09-22T14:27:12 | 297,254,220 | 0 | 1 | MIT | 2020-09-22T14:27:14 | 2020-09-21T06:55:30 | null | UTF-8 | Python | false | false | 487 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-02-05 16:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('maps', '0008_locationpoints_shortcode'),
]
operations = [
migrations.AlterField(
model_name='locationpoints',
name='shortcode',
field=models.CharField(blank=True, max_length=3, null=True),
),
]
| [
"[email protected]"
] | |
83a06f4dcd736336f0501d6bd4aa3a13a87113b8 | 7024d0fab7adee2937ddab28a2b69481ef76f9a8 | /llvm-archive/hlvm/build/bytecode.py | e21978e91a6f32943d5ac37306068592af7b29a3 | [] | no_license | WeilerWebServices/LLVM | 5b7927da69676a7c89fc612cfe54009852675450 | 1f138562730a55380ea3185c7aae565d7bc97a55 | refs/heads/master | 2022-12-22T09:05:27.803365 | 2020-09-24T02:50:38 | 2020-09-24T02:50:38 | 297,533,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,843 | py | from SCons.Environment import Environment as Environment
import re,fileinput,os
from string import join as sjoin
from os.path import join as pjoin
def AsmFromBytecodeMessage(target,source,env):
return "Generating Native Assembly From LLVM Bytecode" + source[0].path
def AsmFromBytecodeAction(target,source,env):
theAction = env.Action(env['with_llc'] + ' -f -fast -o ' + target[0].path +
' ' + source[0].path)
env.Depends(target,env['with_llc'])
return env.Execute(theAction)
def BytecodeFromAsmMessage(target,source,env):
return "Generating Bytecode From LLVM Assembly " + source[0].path
def BytecodeFromAsmAction(target,source,env):
theAction = env.Action(env['with_llvmas'] +
' -f -o ' + target[0].path + ' ' + source[0].path + ' ' +
env['LLVMASFLAGS'])
env.Depends(target,env['with_llvmas'])
return env.Execute(theAction);
def BytecodeFromCppMessage(target,source,env):
return "Generating Bytecode From C++ Source " + source[0].path
def BytecodeFromCppAction(target,source,env):
includes = ""
for inc in env['CPPPATH']:
if inc[0] == '#':
inc = env['AbsSrcRoot'] + inc[1:]
includes += " -I" + inc
defines = ""
for d in env['CPPDEFINES'].keys():
if env['CPPDEFINES'][d] == None:
defines += " -D" + d
else:
defines += " -D" + d + "=" + env['CPPDEFINES'][d]
src = source[0].path
tgt = target[0].path
theAction = env.Action(
env['with_llvmgxx'] + ' $CXXFLAGS ' +
includes + defines + " -c -emit-llvm -g -O3 -x c++ " + src + " -o " + tgt )
env.Depends(target,env['with_llvmgxx'])
return env.Execute(theAction);
def BytecodeArchiveMessage(target,source,env):
return "Generating Bytecode Archive From Bytecode Modules"
def BytecodeArchiveAction(target,source,env):
sources = ''
for src in source:
sources += ' ' + src.path
theAction = env.Action(
env['with_llvmar'] + ' cr ' + target[0].path + sources)
env.Depends(target[0],env['with_llvmar'])
return env.Execute(theAction);
def Bytecode(env):
bc2s = env.Action(AsmFromBytecodeAction,AsmFromBytecodeMessage)
ll2bc = env.Action(BytecodeFromAsmAction,BytecodeFromAsmMessage)
cpp2bc = env.Action(BytecodeFromCppAction,BytecodeFromCppMessage)
arch = env.Action(BytecodeArchiveAction,BytecodeArchiveMessage)
bc2s_bldr = env.Builder(action=bc2s,suffix='s',src_suffix='bc',
single_source=1)
ll2bc_bldr = env.Builder(action=ll2bc,suffix='bc',src_suffix='ll',
single_source=1)
cpp2bc_bldr = env.Builder(action=cpp2bc,suffix='bc',src_suffix='cpp',
single_source=1)
arch_bldr = env.Builder(action=arch,suffix='bca',src_suffix='bc',
src_builder=[ cpp2bc_bldr,ll2bc_bldr])
env.Append(BUILDERS = {
'll2bc':ll2bc_bldr, 'cpp2bc':cpp2bc_bldr, 'bc2s':bc2s_bldr,
'BytecodeArchive':arch_bldr
})
return 1
| [
"[email protected]"
] | |
56f7c3b75781176c8c211e3d1a86345e6544e8be | ae74e9e03e9b8ba1d407bd5907d3fe197ce47a44 | /ggplot/04-graphs.py | 8d7d9c715bd502c503e9f71fb44de994503d773c | [] | no_license | dylanjorgensen/modules | 3f937298df6b1da0851cfbc4cbf6f046b81b303c | 9296284d3acdb0f899ad19f013fff4d73d0fcc0b | refs/heads/master | 2021-01-11T03:23:11.591989 | 2016-10-22T01:21:53 | 2016-10-22T01:21:53 | 71,014,891 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | from ggplot import *
import pandas as pd
import numpy as np
# Generate data
df = pd.read_csv("baseball-pitches-clean.csv")
df = df[['pitch_time', 'inning', 'pitcher_name', 'hitter_name', 'pitch_type',
'px', 'pz', 'pitch_name', 'start_speed', 'end_speed', 'type_confidence']]
print df.head()
# print df.dtypes
# Strike "scatterplot"
# print ggplot(df, aes(x='px', y='pz')) + geom_point()
# Basic "histogram"
print ggplot(df, aes(x='start_speed')) + geom_histogram()
# Basic "facet wrap"
# print ggplot(aes(x='start_speed'), data=df) + geom_histogram() + facet_wrap('pitch_name')
# Basic "bar graph"
# print ggplot(aes(x='pitch_type'), data=df) + geom_bar()
# Basic "facet grid" # This lines up the grid for comparison
# print ggplot(aes(x='start_speed'), data=df) + geom_histogram() + facet_grid('pitch_type')
# Basic "geom density" # To compare various categorical frequency's in the same field
# print ggplot(df, aes(x='start_speed')) + geom_density()
# print ggplot(df, aes(x='start_speed', color='pitch_name')) + geom_density()
| [
"[email protected]"
] | |
6de8a8e58c7239a48d420c9f23e548175002d66e | 44eb40bf7bbd006f441b22d149dbb06eebe97506 | /src/chap05/gradient_check.py | f9819365dec71b653a1702cc28bc95aa3ac59923 | [] | no_license | hoonest/Deep_Learning | 56939f983c81e75b79d5474c11649dd57bf7107b | dd94f46ff886f20a47b09a54593e5fd2d53f0ed4 | refs/heads/master | 2020-04-19T22:52:03.640247 | 2019-02-19T03:34:16 | 2019-02-19T03:34:16 | 168,481,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | # coding: utf-8
import sys, os
sys.path.append(os.pardir) # ๋ถ๋ชจ ๋๋ ํฐ๋ฆฌ์ ํ์ผ์ ๊ฐ์ ธ์ฌ ์ ์๋๋ก ์ค์
import numpy as np
from src.dataset.mnist import load_mnist
from src.chap05.two_layer_net import TwoLayerNet
# ๋ฐ์ดํฐ ์ฝ๊ธฐ
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
x_batch = x_train[:3]
t_batch = t_train[:3]
grad_numerical = network.numerical_gradient(x_batch, t_batch)
grad_backprop = network.gradient(x_batch, t_batch)
# ๊ฐ ๊ฐ์ค์น์ ์ ๋ ์ค์ฐจ์ ํ๊ท ์ ๊ตฌํ๋ค.
for key in grad_numerical.keys():
diff = np.average( np.abs(grad_backprop[key] - grad_numerical[key]) )
print(key + ":" + str(diff))
| [
"[email protected]"
] | |
beda1750d055f278c7feba99c51342ec22251e02 | 2d4e020e6ab48c46e0a19cb69048d9e8d26e46a6 | /Job_Portal/job_portal/main/migrations/0005_auto_20210202_0143.py | 94b50b03f39910c2733310db1be8d839c9c1ae73 | [] | no_license | IsmailTitas1815/Learning | a92476fcf7bcd28a7dc1ab2f4eb3a5c27034728f | 207eaf4101a6d161c1044310f4b3cc54e9c514eb | refs/heads/master | 2023-07-04T20:13:07.263331 | 2021-08-07T20:07:39 | 2021-08-07T20:07:39 | 293,100,950 | 0 | 0 | null | 2021-05-07T16:55:29 | 2020-09-05T15:18:46 | Python | UTF-8 | Python | false | false | 577 | py | # Generated by Django 3.1.5 on 2021-02-01 19:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0004_auto_20210202_0138'),
]
operations = [
migrations.RemoveField(
model_name='candidate',
name='tag',
),
migrations.AddField(
model_name='candidate',
name='tag',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='main.tag'),
),
]
| [
"[email protected]"
] | |
cf08ddf5aa4fd4f0d5bbd4c4f17f8720aa26e1c0 | bb0f5ec6ee0ed99afb09087ff0ea9bfe32b7ea49 | /utills/amount.py | 87463e68aad88f653aba56ac9ab975e44a5ea3b3 | [] | no_license | persontianshuang/lottery_admin | 49f3386d75671d0b2c43dfea3987e7fa8df84c9b | d8ebc7cf778cac24055a709886fbaa3b03325a69 | refs/heads/master | 2021-09-03T10:04:40.151502 | 2018-01-08T08:09:50 | 2018-01-08T08:09:50 | 111,886,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,423 | py | import random,time
from datetime import datetime
class AmountCommon():
def __init__(self,db):
self.db = db
def sum_amount(self, db):
return sum([x.amount for x in db])
def all(self):
return str(self.sum_amount(self.db))
def today(self):
t = time.localtime(time.time())
time1 = time.mktime(time.strptime(time.strftime('%Y-%m-%d 00:00:00', t),
'%Y-%m-%d %H:%M:%S'))
today_zero = int(time1)
now = int(time.time())
tdb = self.db.filter(created__range=(today_zero, now))
return str(self.sum_amount(tdb))
def this_month(self):
d = datetime.now()
b = '{0}-{1}-1 00:00:00'.format(d.year, d.month)
month_zero = int(time.mktime(time.strptime(b, "%Y-%m-%d %H:%M:%S")))
now = int(time.time())
tdb = self.db.filter(created__range=(month_zero, now))
return str(self.sum_amount(tdb))
def time_formater(self,year,month,day):
b = '{0}-{1}-{2} 00:00:00'.format(year, month, day)
month_zero = int(time.mktime(time.strptime(b, "%Y-%m-%d %H:%M:%S")))
return month_zero
def time_range(self,last,now):
tlast = self.time_formater(last[0], last[1], last[2])
tnow = self.time_formater(now[0], now[1], now[2])
tdb = self.db.filter(created__range=(tlast, tnow))
return str(self.sum_amount(tdb)) | [
"[email protected]"
] | |
0d399b3c2d49ebf9b122a374ffd30ba15918ed1c | daeebdbbce15b25975f2fdca5ed43cde36be9e05 | /src/pipelinex/hatch_dict/hatch_dict.py | 410fe9aa5745baafc8af14b107003ae9a29384dd | [
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | permissive | shibuiwilliam/pipelinex | 4b3e7340c027dffba310785cb900c2c503877841 | 5bc4911c6b253cf29f9b6153d11be42a7d82dd48 | refs/heads/master | 2021-05-16T19:03:32.125740 | 2021-04-28T01:19:52 | 2021-04-28T01:19:52 | 363,591,360 | 1 | 0 | NOASSERTION | 2021-05-02T07:22:06 | 2021-05-02T07:22:06 | null | UTF-8 | Python | false | false | 9,894 | py | import importlib
from typing import Any, Union, List, Iterable # NOQA
from logging import getLogger
log = getLogger(__name__)
class HatchDict:
def __init__(
self,
egg, # type: Union[dict, List]
lookup={}, # type: dict
support_nested_keys=True, # type: bool
self_lookup_key="$", # type: str
support_import=True, # type: bool
additional_import_modules=["pipelinex"], # type: Union[List, str]
obj_key="=", # type: str
eval_parentheses=True, # type: bool
):
# type: (...) -> None
assert egg.__class__.__name__ in {"dict", "list"}
assert lookup.__class__.__name__ in {"dict"}
assert support_nested_keys.__class__.__name__ in {"bool"}
assert self_lookup_key.__class__.__name__ in {"str"}
assert additional_import_modules.__class__.__name__ in {"list", "str"}
assert obj_key.__class__.__name__ in {"str"}
aug_egg = {}
if isinstance(egg, dict):
if support_nested_keys:
aug_egg = dot_flatten(egg)
aug_egg.update(egg)
self.aug_egg = aug_egg
self.egg = egg
self.lookup = {}
self.lookup.update(_builtin_funcs())
self.lookup.update(lookup)
self.self_lookup_key = self_lookup_key
self.support_import = support_import
self.additional_import_modules = (
[additional_import_modules]
if isinstance(additional_import_modules, str)
else additional_import_modules or [__name__]
)
self.obj_key = obj_key
self.eval_parentheses = eval_parentheses
self.warmed_egg = None
self.snapshot = None
def get(
self,
key=None, # type: Union[str, int]
default=None, # type: Any
lookup={}, # type: dict
):
# type: (...) -> Any
assert (key is None) or (
key.__class__.__name__
in {
"str",
"int",
}
), "Received key: {}".format(key)
assert lookup.__class__.__name__ in {"dict"}, "Received lookup: s{}".format(
lookup
)
if key is None:
d = self.egg
else:
if isinstance(self.egg, dict):
d = self.aug_egg.get(key, default)
if isinstance(self.egg, list):
assert isinstance(key, int)
d = self.egg[key] if (0 <= key < len(self.egg)) else default
if self.self_lookup_key:
s = dict()
while d != s:
d, s = _dfs_apply(
d_input=d,
hatch_args=dict(lookup=self.aug_egg, obj_key=self.self_lookup_key),
)
self.warmed_egg = d
if self.eval_parentheses:
d, s = _dfs_apply(
d_input=d, hatch_args=dict(eval_parentheses=self.eval_parentheses)
)
self.warmed_egg = d
lookup_input = {}
lookup_input.update(self.lookup)
lookup_input.update(lookup)
if isinstance(self.egg, dict):
forcing_module = self.egg.get("FORCING_MODULE", "")
module_aliases = self.egg.get("MODULE_ALIASES", {})
for m in self.additional_import_modules:
d, s = _dfs_apply(
d_input=d,
hatch_args=dict(
lookup=lookup_input,
support_import=self.support_import,
default_module=m,
forcing_module=forcing_module,
module_aliases=module_aliases,
obj_key=self.obj_key,
),
)
self.snapshot = s
return d
def get_params(self):
return self.snapshot
def keys(self):
return self.egg.keys()
def items(self):
assert isinstance(self.egg, dict)
return [(k, self.get(k)) for k in self.egg.keys()]
def _dfs_apply(
d_input, # type: Any
hatch_args, # type: dict
):
# type: (...) -> Any
eval_parentheses = hatch_args.get("eval_parentheses", False) # type: bool
lookup = hatch_args.get("lookup", dict()) # type: dict
support_import = hatch_args.get("support_import", False) # type: bool
default_module = hatch_args.get("default_module", "") # type: str
forcing_module = hatch_args.get("forcing_module", "") # type: str
module_aliases = hatch_args.get("module_aliases", {}) # type: dict
obj_key = hatch_args.get("obj_key", "=") # type: str
d = d_input
s = d_input
if isinstance(d_input, dict):
obj_str = d_input.get(obj_key)
d, s = {}, {}
for k, v in d_input.items():
d[k], s[k] = _dfs_apply(v, hatch_args)
if obj_str:
if obj_str in lookup:
a = lookup.get(obj_str)
d = _hatch(d, a, obj_key=obj_key)
elif support_import:
if forcing_module:
obj_path_list = obj_str.rsplit(".", 1)
obj_str = "{}.{}".format(forcing_module, obj_path_list[-1])
if module_aliases:
obj_path_list = obj_str.rsplit(".", 1)
if len(obj_path_list) == 2 and obj_path_list[0] in module_aliases:
module_alias = module_aliases.get(obj_path_list[0])
if module_alias is None:
obj_path_list.pop(0)
else:
obj_path_list[0] = module_alias
obj_str = ".".join(obj_path_list)
a = load_obj(obj_str, default_obj_path=default_module)
d = _hatch(d, a, obj_key=obj_key)
if isinstance(d_input, list):
d, s = [], []
for v in d_input:
_d, _s = _dfs_apply(v, hatch_args)
d.append(_d)
s.append(_s)
if isinstance(d_input, str):
if (
eval_parentheses
and len(d_input) >= 2
and d_input[0] == "("
and d_input[-1] == ")"
):
d = eval(d)
return d, s
def _hatch(
d, # type: dict
a, # type: Any
obj_key="=", # type: str
pos_arg_key="_", # type: str
attr_key=".", # type: str
):
d.pop(obj_key)
if d:
assert callable(a), "{} is not callable.".format(a)
pos_args = d.pop(pos_arg_key, None)
if pos_args is None:
pos_args = []
if not isinstance(pos_args, list):
pos_args = [pos_args]
attribute_name = d.pop(attr_key, None)
for k in d:
assert isinstance(
k, str
), "Non-string key '{}' in '{}' is not valid for callable: '{}'.".format(
k, d, a.__name__
)
d = a(*pos_args, **d)
if attribute_name:
d = getattr(d, attribute_name)
# if isinstance(d, MethodType):
# d = lambda *args: d(args[0])
else:
d = a
return d
def dot_flatten(d):
try:
from flatten_dict import flatten
d = flatten(d, reducer="dot")
except Exception:
log.warning("{} failed to be flattened.".format(d), exc_info=True)
return d
def pass_(*argsignore, **kwargsignore):
return None
def pass_through(*args, **kwargs):
return args[0] if args else list(kwargs.values())[0] if kwargs else None
class ToPipeline:
def __init__(self, *args):
if len(args) == 1:
args = args[0]
self.args = args
def __call__(self):
return self.args
class Construct:
def __init__(self, obj):
self.obj = obj
def __call__(self, *args, **kwargs):
return self.obj(*args, **kwargs)
class Method:
method = None
def __init__(self, *args, **kwargs):
if self.method is None:
self.method = kwargs.pop("method")
self.args = args
self.kwargs = kwargs
def __call__(self, d):
if isinstance(d, dict):
d = HatchDict(d)
attr = getattr(d, self.method, None)
if callable(attr):
return attr(*self.args, **self.kwargs)
else:
return d
class Get(Method):
method = "get"
def feed(func, args):
assert callable(func)
if isinstance(args, dict):
posargs = args.pop("_", [])
kwargs = args
elif isinstance(args, (list, tuple)):
posargs = args
kwargs = dict()
else:
posargs = [args]
kwargs = dict()
def _feed(*argsignore, **kwargsignore):
return func(*posargs, **kwargs)
return _feed
def _builtin_funcs():
return dict(
pass_=pass_,
pass_through=pass_through,
ToPipeline=ToPipeline,
Construct=Construct,
Method=Method,
Get=Get,
)
"""
Copyright 2018-2019 QuantumBlack Visual Analytics Limited
regarding `load_obj` function copied from
https://github.com/quantumblacklabs/kedro/blob/0.15.4/kedro/utils.py
"""
def load_obj(obj_path: str, default_obj_path: str = "") -> Any:
"""Extract an object from a given path.
Args:
obj_path: Path to an object to be extracted, including the object name.
default_obj_path: Default object path.
Returns:
Extracted object.
Raises:
AttributeError: When the object does not have the given named attribute.
"""
obj_path_list = obj_path.rsplit(".", 1)
obj_path = obj_path_list.pop(0) if len(obj_path_list) > 1 else default_obj_path
obj_name = obj_path_list[0]
module_obj = importlib.import_module(obj_path)
if not hasattr(module_obj, obj_name):
raise AttributeError(
"Object `{}` cannot be loaded from `{}`.".format(obj_name, obj_path)
)
return getattr(module_obj, obj_name)
| [
"[email protected]"
] | |
d0cca4222c8b6367b193a93bbb16784b03bdbf6d | 0bd3e809967ce2e02353a1c5559725bf3c9b6a7e | /update_bind_conf_gw.py | dc4273fb723c2b745ee9c0d667b3be3768301f8e | [] | no_license | ka-ba/backend-scripts | 79ea992852d4afaf24c1cd60146be1e3df06aa20 | 87ddce68d224a13f7062d8ec3825a46fb98fa343 | refs/heads/master | 2021-01-20T03:01:53.095776 | 2015-04-21T13:13:27 | 2015-04-21T13:13:27 | 27,978,828 | 0 | 0 | null | 2015-04-21T12:49:20 | 2014-12-14T00:50:09 | Python | UTF-8 | Python | false | false | 1,114 | py | #!/usr/bin/env python3
def update_bind_conf():
from photon.util.files import read_file
from common import pinit
photon, settings = pinit('update_bind_conf', verbose=True)
for repo in ['scripts', 'meta']:
photon.git_handler(
settings['icvpn']['icdns'][repo]['local'],
remote_url=settings['icvpn']['icdns'][repo]['remote']
)._pull()
bind_conf = photon.template_handler('${config_content}')
config_content=photon.m(
'genarating bind conf',
cmdd=dict(
cmd='./mkdns -f bind -s %s -x mainz -x wiesbaden' %(settings['icvpn']['icdns']['meta']['local']),
cwd=settings['icvpn']['icdns']['scripts']['local']\
)
).get('out')
bind_conf.sub = dict(config_content=config_content)
conf = settings['icvpn']['icdns']['conf']
if bind_conf.sub != read_file(conf):
bind_conf.write(conf, append=False)
photon.m(
'reloading bind daemon',
cmdd=dict(
cmd='sudo rndc reload'
)
)
if __name__ == '__main__':
update_bind_conf()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.