blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2833dbab283205ed517a25347f6766087148e5cf | d565991b30a72837cd6f335c3ea9802a9b472467 | /Acoustic_Signal/test/SamplingRateConversion.py | 7dd39c8fabb129e810b88832a84ee768809b72c0 | [
"Apache-2.0",
"MIT"
] | permissive | philip-shen/note_python | c8ebab9731a2f8c40a2ab1ad4f0ca0cf4ab24f59 | ee6940486c557f9be2e6b967b28656e30c3598dd | refs/heads/master | 2023-08-09T12:05:14.974944 | 2023-08-06T03:16:07 | 2023-08-06T03:16:07 | 175,354,005 | 0 | 0 | MIT | 2023-02-16T06:47:10 | 2019-03-13T05:42:46 | Tcl | UTF-8 | Python | false | false | 5,721 | py | # -*- coding:utf-8 -*-
import numpy as np
import scipy.signal
import wave
import array
import struct
from scipy.io import wavfile
def wav_read(file_path, mmap=False):
"""
return sample value between range(-1,1)
Note" librosa.load use aioread, which may truncate the precision of the audio data to 16 bits.
:param file_path:
:param mmap: False read all data directly, True read data memory mapping
:return: samples ,fs
"""
fs, samples = wavfile.read(file_path, mmap=mmap)
# transfer samples from fixed to float
if samples.dtype == np.int16:
samples = np.array(samples, dtype=np.float32)
samples /= 2 ** 15
elif samples.dtype == np.float32:
samples = np.array(samples)
else:
raise NotImplementedError
return samples, fs
def wav_write(file_path, samples, fs, wav_type='int16'):
# scipy.io.wavfile.write cannot process np.float16 data
if wav_type == 'float32':
wavfile.write(file_path, fs, samples.astype(np.float32))
elif wav_type == 'int16':
output_samples = samples * (2 ** 15)
wav_type_iinfo = np.iinfo(wav_type)
output_samples.clip(min=wav_type_iinfo.min, max=wav_type_iinfo.max,
out=output_samples)
output_samples = output_samples.astype(wav_type)
wavfile.write(file_path, fs, output_samples)
else:
raise NotImplementedError
def readWav(filename):
"""
wavファイルを読み込んで,データ・サンプリングレートを返す関数
"""
try:
wf = wave.open(filename)
fs = wf.getframerate()
# -1 ~ 1までに正規化した信号データを読み込む
data = np.frombuffer(wf.readframes(wf.getnframes()),dtype="int16")/32768.0
return (data,fs)
except Exception as e:
print(e)
exit()
def writeWav(filename,data,fs):
"""
入力されたファイル名でwavファイルを書き出す.
"""
# データを-32768から32767の整数値に変換
data = [int(x * 32767.0) for x in data]
#バイナリ化
binwave = struct.pack("h" * len(data), *data)
wf = wave.Wave_write(filename)
wf.setparams((
1, # channel
2, # byte width
fs, # sampling rate
len(data), # number of frames
"NONE", "not compressed" # no compression
))
wf.writeframes(binwave)
wf.close()
def upsampling(conversion_rate,data,fs):
"""
アップサンプリングを行う.
入力として,変換レートとデータとサンプリング周波数.
アップサンプリング後のデータとサンプリング周波数を返す.
"""
# 補間するサンプル数を決める
interpolationSampleNum = conversion_rate-1
# FIRフィルタの用意をする
nyqF = (fs*conversion_rate)/2.0 # 変換後のナイキスト周波数
cF = (fs/2.0-500.)/nyqF # カットオフ周波数を設定(変換前のナイキスト周波数より少し下を設定)
taps = 511 # フィルタ係数(奇数じゃないとだめ)
b = scipy.signal.firwin(taps, cF) # LPFを用意
# 補間処理
upData = []
for d in data:
upData.append(d)
# 1サンプルの後に,interpolationSampleNum分だけ0を追加する
for i in range(interpolationSampleNum):
upData.append(0.0)
# フィルタリング
resultData = scipy.signal.lfilter(b,1,upData)
return (resultData,fs*conversion_rate)
def downsampling(conversion_rate,data,fs):
"""
ダウンサンプリングを行う.
入力として,変換レートとデータとサンプリング周波数.
アップサンプリング後のデータとサンプリング周波数を返す.
"""
# 間引くサンプル数を決める
decimationSampleNum = conversion_rate-1
# FIRフィルタの用意をする
nyqF = (fs/conversion_rate)/2.0 # 変換後のナイキスト周波数
cF = (fs/conversion_rate/2.0-500.)/nyqF # カットオフ周波数を設定(変換前のナイキスト周波数より少し下を設定)
taps = 511 # フィルタ係数(奇数じゃないとだめ)
b = scipy.signal.firwin(taps, cF) # LPFを用意
#フィルタリング
data = scipy.signal.lfilter(b,1,data)
#間引き処理
downData = []
for i in range(0,len(data),decimationSampleNum+1):
downData.append(data[i])
return (downData,fs/conversion_rate)
FILENAME = "D:/project/FeqResp/Asus/asus_S54C_0807_Igo_Speech_FR_BandG/dut.wav"
#FILENAME = "../src_wav/3Quest_Standmic.wav"
if __name__ == "__main__":
# 何倍にするかを決めておく
up_conversion_rate = 2
# 何分の1にするか決めておく.ここではその逆数を指定しておく(例:1/2なら2と指定)
down_conversion_rate = 2
down_conversion_wave = "D:/project/FeqResp/Asus/asus_S54C_0807_Igo_Speech_FR_BandG/dut_16k.wav"
# テストwavファイルを読み込む
#data,fs = readWav(FILENAME)
data,fs = wav_read(FILENAME)
print('fs {}',fs)
upData,upFs = upsampling(up_conversion_rate,data,fs)
downData,downFs = downsampling(down_conversion_rate,data,fs)
#writeWav("../src_wav/up.wav",upData,upFs)
writeWav(down_conversion_wave,downData,downFs)
#wav_write("../src_wav/up.wav",upFs,upData)
#wav_write("../src_wav/down.wav",downFs,downData)
| [
"[email protected]"
] | |
eed25826a0b13183199cd8198fbb81ba552fa0cb | cd0f3fa5c3b202599812ac8b49e374fe2b2f2e8b | /ExerciciosAprendizagem/CapApendices/ApBex02.py | 6aea5b29bb4f6629cb43b944a077056fb3ad0dcb | [] | no_license | J-AugustoManzano/livro_Python | 46c14dc4bc5fb361d850fcd361477a952de172c2 | e42b79ef78c6b1ab936fe9a13d32ddc94deeb2a8 | refs/heads/main | 2023-06-25T03:10:30.297226 | 2023-06-08T23:34:54 | 2023-06-08T23:34:54 | 354,116,051 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | import const
const.pi = 3.14159
print(const.pi)
enter = input("\nPressione <Enter> para encerrar... ")
| [
"[email protected]"
] | |
1581702e30bf17521341ad3c8b02a00a78d7d427 | 86948735307c603936f22fc029349bf669ecaa6e | /ble-sensor-pi/sensortag/echoserverzal.py | f219034d6d6edca7ddc785107e3321c4558c534d | [
"Apache-2.0"
] | permissive | Kowo39/pythonPeter | 2b43fb1fc8ef16700be53e912b04b53117f745d5 | 629550a160445760cb95f6e9b58df5264e24a9e1 | refs/heads/master | 2021-01-10T20:26:41.463098 | 2015-07-21T15:38:58 | 2015-07-21T15:38:58 | 39,141,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,116 | py | import socket
import sys
import time
import datetime
#create a TCP/IP Socket
sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#bind the socket to the port
server_address = ('localhost', 10000)
print >>sys.stderr, 'starting up on %s port %s' % server_address
sock.bind(server_address)
#listen incomming connection
sock.listen(1)
while True:
#wait for a connection
print >>sys.stderr, '\n\n\n\t\t*** A Server waiting for a connection ***'
connection, client_address = sock.accept()
ts = time.time()
try:
print >>sys.stderr, '\n - connection from', client_address
print " -",datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
#Recieve data in small chunks and retrasmi it
while True:
data = connection.recv(30)
if data:
pass
print >>sys.stderr, ' - Data received from tag"%s"' %data
# print >>sys.stderr, 'sendinng data back to the client'
# connection.sendall(data)
else:
#print >>sys.stderr, '\nno more data my friend from', client_address
break
finally:
pass #Clean up the conection
#connection.close()
| [
"root@raspberrypi.(none)"
] | root@raspberrypi.(none) |
56ed3798bc39ceb83d99ccb445df785a9b2636ee | 5785d7ed431b024dd910b642f10a6781df50e4aa | /.venv/lib/python3.8/site-packages/aws_okta_processor/commands/getroles.py | 784273d95ce8b03239ea7970f701411f6994ee96 | [] | no_license | kashyapa/interview-prep | 45d77324446da34d99bf8efedb3544b367b5523e | 7060c090c40602fb9c4778eace2078e1b51e235b | refs/heads/master | 2023-07-28T13:12:49.515299 | 2021-09-06T14:33:25 | 2021-09-06T14:33:25 | 403,706,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,102 | py | """
Usage: aws-okta-processor get-roles [options]
Options:
-h --help Show this screen.
--version Show version.
--no-okta-cache Do not read okta cache.
--no-aws-cache Do not read aws cache.
-e --environment Dump auth into ENV variables.
-u <user_name>, --user=<user_name> Okta user name.
-p <user_pass>, --pass=<user_pass> Okta user password.
-o <okta_organization>, --organization=<okta_organization> Okta organization domain.
-a <okta_application>, --application=<okta_application> Okta application url.
-r <role_name>, --role=<role_name> AWS role ARN.
-R <region_name>, --region=<region_name> AWS region name.
-A <account>, --account-alias=<account> AWS account alias filter (uses wildcards).
-d <duration_seconds> ,--duration=<duration_seconds> Duration of role session [default: 3600].
-k <key>, --key=<key> Key used for generating and accessing cache.
-f <factor>, --factor=<factor> Factor type for MFA.
-s --silent Run silently.
--target-shell <target_shell> Target shell to output the export command.
--output=<output> Output type (json, text, profiles) [default: json]
--output-format=<format> Format string for the output
[default: {account},{role}]
"""
from __future__ import print_function
import os
import json
import re
import sys
from .base import Base
from aws_okta_processor.core.fetcher import SAMLFetcher
from botocore.credentials import JSONFileCache
UNIX_EXPORT_STRING = ("export AWS_ACCESS_KEY_ID='{}' && "
"export AWS_SECRET_ACCESS_KEY='{}' && "
"export AWS_SESSION_TOKEN='{}'")
NT_EXPORT_STRING = ("$env:AWS_ACCESS_KEY_ID='{}'; "
"$env:AWS_SECRET_ACCESS_KEY='{}'; "
"$env:AWS_SESSION_TOKEN='{}'")
CONFIG_MAP = {
"--environment": "AWS_OKTA_ENVIRONMENT",
"--user": "AWS_OKTA_USER",
"--pass": "AWS_OKTA_PASS",
"--organization": "AWS_OKTA_ORGANIZATION",
"--application": "AWS_OKTA_APPLICATION",
"--role": "AWS_OKTA_ROLE",
"--duration": "AWS_OKTA_DURATION",
"--key": "AWS_OKTA_KEY",
"--factor": "AWS_OKTA_FACTOR",
"--silent": "AWS_OKTA_SILENT",
"--no-okta-cache": "AWS_OKTA_NO_OKTA_CACHE",
"--no-aws-cache": "AWS_OKTA_NO_AWS_CACHE",
"--output": "AWS_OKTA_OUTPUT",
"--output-format": "AWS_OKTA_OUTPUT_FORMAT"
}
class GetRoles(Base):
def get_accounts_and_roles(self):
cache = JSONFileCache()
saml_fetcher = SAMLFetcher(
self,
cache=cache
)
app_and_role = saml_fetcher.get_app_roles()
result_accounts = []
results = {
"application_url": app_and_role["Application"],
"accounts": result_accounts,
"user": app_and_role["User"],
"organization": app_and_role["Organization"],
}
accounts = app_and_role["Accounts"]
for name_raw in accounts:
account_parts = re.match(r"(Account:) ([a-zA-Z0-9-_]+) \(([0-9]+)\)", name_raw)
account = account_parts[2]
account_id = account_parts[3]
roles = accounts[name_raw]
result_roles = []
result_account = {
"name": account,
"id": account_id,
"name_raw": name_raw,
"roles": result_roles
}
result_accounts.append(result_account)
for role in roles:
role_suffix = role.split(os.environ.get("AWS_OKTA_ROLE_SUFFIX_DELIMITER", "-"))[-1]
result_roles.append({
"name": role,
"suffix": role_suffix
})
return results
def run(self):
accounts_and_roles = self.get_accounts_and_roles()
output = self.configuration.get("AWS_OKTA_OUTPUT", "json").lower()
if output == "json":
sys.stdout.write(json.dumps(accounts_and_roles))
else:
output_format = self.configuration.get("AWS_OKTA_OUTPUT_FORMAT", "{account},{role}")
if output == "profiles":
output_format = '\n[{account}-{role_suffix}]\ncredential_process=aws-okta-processor authenticate ' \
'--organization="{organization}" --user="{user}" --application="{application_url}" ' \
'--role="{role}" --key="{account}-{role}"'
formatted_roles = self.get_formatted_roles(accounts_and_roles, output_format)
for role in formatted_roles:
sys.stdout.write(role + "\n")
def get_formatted_roles(self, accounts_and_roles, output_format):
application_url = accounts_and_roles["application_url"]
accounts = accounts_and_roles["accounts"]
organization = accounts_and_roles["organization"]
user = accounts_and_roles["user"]
for account in accounts:
account_name = account["name"]
account_id = account["id"]
account_raw = account["name_raw"]
roles = account["roles"]
for role in roles:
yield output_format.format(
account=account_name,
account_id=account_id,
account_raw=account_raw,
role=role["name"],
organization=organization,
application_url=application_url,
user=user,
role_suffix=role["suffix"].lower()
)
def get_pass(self):
if self.configuration["AWS_OKTA_PASS"]:
return self.configuration["AWS_OKTA_PASS"]
def get_key_dict(self):
return {
"Organization": self.configuration["AWS_OKTA_ORGANIZATION"],
"User": self.configuration["AWS_OKTA_USER"],
"Key": self.configuration["AWS_OKTA_KEY"]
}
def get_configuration(self, options=None):
configuration = {}
for param, var in CONFIG_MAP.items():
if options.get(param, None):
configuration[var] = options[param]
if var not in configuration.keys():
if var in os.environ:
configuration[var] = os.environ[var]
else:
configuration[var] = None
return configuration
| [
"[email protected]"
] | |
03f970ba4bcdee92ff5b8b31f973424f87b081b6 | 2efd0540d7b05d1e56b625a92172b6aac0c9a48e | /Copa/venv/Scripts/pip3.6-script.py | a8aa2e13aee23f62ade69cbd0b144de241c157b8 | [] | no_license | EricKurachi/algoritmo_genetico | f40003381fc0baba91db2e42437e59f33bb0ae5c | 46659dbfcdd6a93f66f2e8dff4c920d6300c1c41 | refs/heads/master | 2021-10-09T08:13:09.647460 | 2018-12-23T23:46:35 | 2018-12-23T23:46:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | #!C:\Users\Eric\Python\Copa\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.6'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.6')()
)
| [
"[email protected]"
] | |
847e269f9a9c6a38b6d1af2ea9b074571cd66b64 | a425842a51deab915fc4319b3226cef3f49e53ea | /build/extriPACK/intelligent_actuator/robo_cylinder/catkin_generated/pkg.installspace.context.pc.py | 25b485d848699b5198e6532286e6957b091976b9 | [] | no_license | Sinchiguano/Part-Localization-For-Robotic-Arm | 1458204e52f34354cbd0e8e1bff1dfaf6caefe1c | ebc1ed19da171ff4b5a52a3a031ae3049b0b9eb8 | refs/heads/master | 2021-10-08T19:49:53.455680 | 2018-12-16T20:03:04 | 2018-12-16T20:03:04 | 155,774,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/casch/yumi_ws/install/include".split(';') if "/home/casch/yumi_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;rospy;std_msgs;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "robo_cylinder"
PROJECT_SPACE_DIR = "/home/casch/yumi_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
] | |
d2211d3eaf542db3587fdce683a7c3b3881827a9 | bcf98d9adf6f0e44601d91c83453b01ad311071d | /listings/models.py | 6ccf1ecd4cff2f0209fb8a27b2f7a0ceed3d7559 | [] | no_license | GMNaim/Real-Estate-project | 89fa64cb5afeb344d43095da93bbcbf35b6b9121 | 93286edde1e5d34dcbbf3ca1695a5ef4e3577f64 | refs/heads/master | 2022-02-13T06:04:34.202090 | 2019-08-28T14:33:08 | 2019-08-28T14:33:08 | 192,858,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,610 | py | from django.db import models
from datetime import datetime
from realtors.models import RealtorInformation
class IndividualListInformation(models.Model):
realtor = models.ForeignKey(RealtorInformation, on_delete=models.DO_NOTHING)
title = models.CharField(max_length=250)
house_address = models.CharField(max_length=100, blank=True)
state = models.CharField(max_length=100)
zip_code = models.CharField(max_length=50)
city = models.CharField(max_length=100)
country = models.CharField(max_length=100)
description = models.TextField(blank=True)
price = models.IntegerField()
bedrooms = models.IntegerField()
bathrooms = models.DecimalField(max_digits=4, decimal_places=1)
garage = models.IntegerField(default=0)
square_feet = models.IntegerField()
lot_size = models.DecimalField(max_digits=5, decimal_places=1)
photo_main = models.ImageField(upload_to='photos/%Y/%m/%d/')
photo_1 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_2 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_3 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_4 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_5 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_6 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
is_published = models.BooleanField(default=True)
list_date = models.DateTimeField(default=datetime.now, blank=True)
def __str__(self): # Main field to display
return self.title # Here title is the main field
| [
"[email protected]"
] | |
7f9ddb6f4ba36d244e11ae5efd596f4b35b5ff07 | 0cc4eb3cb54f8394c127ace62d3108fdb5230c85 | /.spack-env/view/lib/python3.7/site-packages/pylint/test/functional/misplaced_format_function.py | 4c3b8af018026bba7ccd1d78159bec027a34dd62 | [] | no_license | jacobmerson/spack-develop-env | 5b2d76f58c0b64ae97c64f77a3c4d33a770c71c8 | 5fca20ca343b1a76f05fc635c87f94ed25417d94 | refs/heads/master | 2022-07-04T02:22:50.264727 | 2020-05-06T05:13:50 | 2020-05-06T05:13:50 | 261,657,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | /lore/mersoj/spack/spack/opt/spack/linux-rhel7-x86_64/gcc-7.3.0/py-pylint-2.3.0-pmz72kdc34fnma6vo5sc2y3c5wp5sjeb/lib/python3.7/site-packages/pylint/test/functional/misplaced_format_function.py | [
"[email protected]"
] | |
8841420544c5a92b4429c203c368ca8f123180ab | 32e55bf28b9f22265bcbc1d8c0ebf52a3608187d | /12. Integer to Roman.py | 03281264063156d7c93642f5364115012353c373 | [] | no_license | Garacc/LeetCode | 9f843672a18701d032f36769c9025761199d8caf | 215d12703b2cac4c1ad49d5a0e1060948fbbacd2 | refs/heads/master | 2018-10-10T03:37:48.889898 | 2018-09-17T08:38:22 | 2018-09-17T08:38:22 | 120,304,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 992 | py | class Solution:
def intToRoman(self, num):
"""
:type num: int
:rtype: str
"""
#romandict = {1:'I', 5:'V', 10:'X', 50:'L', 100:'C', 500:'D', 1000:'M'}
thou = num // 1000
hun = (num%1000)//100
ten = (num%100)//10
one = num%10
output = ''
output += thou*'M'
if hun == 9: output += 'CM'
elif hun == 5 or hun == 6 or hun == 7 or hun == 8: output += 'D' + (hun-5)*'C'
elif hun == 4: output += 'CD'
else: output += hun*'C'
if ten == 9: output += 'XC'
elif ten == 5 or ten == 6 or ten == 7 or ten == 8: output += 'L' + (ten-5)*'X'
elif ten == 4: output += 'XL'
else: output += ten*'X'
if one == 9: output += 'IX'
elif one == 5 or one == 6 or one == 7 or one == 8: output += 'V' + (one-5)*'I'
elif one == 4: output += 'IV'
else: output += one*'I'
return output | [
"[email protected]"
] | |
49b096d2e37b444b6167aab4b4c3ee32ff9c8f02 | 21e76f93747336bb649ec1906257b0dee66442d3 | /resources/lib/services/nfsession/nfsession_requests.py | 648a46b52e40073504ca231fd3706fe5f7f3ffd9 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | freedomhkg-tv/plugin.video.netflix | d55f1671b4fbf201e3cda34e6eea4347c3935ee6 | 30ac436ffd02389983df8610aee098eb0bc10b0c | refs/heads/master | 2021-01-02T17:39:28.005728 | 2020-02-09T19:50:18 | 2020-02-09T19:50:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,480 | py | # -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Copyright (C) 2019 Stefano Gottardo - @CastagnaIT
Stateful Netflix session management: handle the http requests
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
import time
import json
import requests
import resources.lib.common as common
import resources.lib.api.website as website
from resources.lib.globals import g
from resources.lib.services.nfsession.nfsession_base import NFSessionBase, needs_login
from resources.lib.database.db_utils import TABLE_SESSION
from resources.lib.api.exceptions import (APIError, WebsiteParsingError,
InvalidMembershipStatusError)
BASE_URL = 'https://www.netflix.com'
"""str: Secure Netflix url"""
URLS = {
'login': {'endpoint': '/login', 'is_api_call': False},
'logout': {'endpoint': '/SignOut', 'is_api_call': False},
'shakti': {'endpoint': '/pathEvaluator', 'is_api_call': True},
'browse': {'endpoint': '/browse', 'is_api_call': False},
'profiles': {'endpoint': '/profiles/manage', 'is_api_call': False},
'switch_profile': {'endpoint': '/SwitchProfile', 'is_api_call': False},
'activate_profile': {'endpoint': '/profiles/switch', 'is_api_call': True},
'pin': {'endpoint': '/pin', 'is_api_call': False},
'pin_reset': {'endpoint': '/pin/reset', 'is_api_call': True},
'pin_service': {'endpoint': '/pin/service', 'is_api_call': True},
'metadata': {'endpoint': '/metadata', 'is_api_call': True},
'set_video_rating': {'endpoint': '/setVideoRating', 'is_api_call': True}, # Old rating system
'set_thumb_rating': {'endpoint': '/setThumbRating', 'is_api_call': True},
'update_my_list': {'endpoint': '/playlistop', 'is_api_call': True},
# Don't know what these could be used for. Keeping for reference
# 'video_list_ids': {'endpoint': '/preflight', 'is_api_call': True},
# 'kids': {'endpoint': '/Kids', 'is_api_call': False}
}
# List of all static endpoints for HTML/JSON POST/GET requests
# How many entries of a list will be fetched with one path request
class NFSessionRequests(NFSessionBase):
"""Handle the http requests"""
@common.addonsignals_return_call
@needs_login
def get(self, component, **kwargs):
"""Execute a GET request to the designated component's URL."""
return self._get(component, **kwargs)
@common.addonsignals_return_call
@needs_login
def post(self, component, **kwargs):
"""Execute a POST request to the designated component's URL."""
return self._post(component, **kwargs)
def _get(self, component, **kwargs):
return self._request_call(
method=self.session.get,
component=component,
**kwargs)
def _post(self, component, **kwargs):
return self._request_call(
method=self.session.post,
component=component,
**kwargs)
@common.time_execution(immediate=True)
def _request_call(self, method, component, **kwargs):
return self._request(method, component, None, **kwargs)
def _request(self, method, component, session_refreshed, **kwargs):
url = (_api_url(component)
if URLS[component]['is_api_call']
else _document_url(component))
common.debug('Executing {verb} request to {url}',
verb='GET' if method == self.session.get else 'POST', url=url)
data, headers, params = self._prepare_request_properties(component,
kwargs)
start = time.clock()
response = method(
url=url,
verify=self.verify_ssl,
headers=headers,
params=params,
data=data)
common.debug('Request took {}s', time.clock() - start)
common.debug('Request returned statuscode {}', response.status_code)
if response.status_code in [404, 401] and not session_refreshed:
# 404 - It may happen when Netflix update the build_identifier version and causes the api address to change
# 401 - It may happen when authURL is not more valid (Unauthorized for url)
# So let's try refreshing the session data (just once)
common.warn('Try refresh session data due to {} http error', response.status_code)
if self.try_refresh_session_data():
return self._request(method, component, True, **kwargs)
response.raise_for_status()
return (_raise_api_error(response.json() if response.content else {})
if URLS[component]['is_api_call']
else response.content)
def try_refresh_session_data(self, raise_exception=False):
"""Refresh session_data from the Netflix website"""
# pylint: disable=broad-except
try:
website.extract_session_data(self._get('profiles'))
self.update_session_data()
common.debug('Successfully refreshed session data')
return True
except InvalidMembershipStatusError:
raise
except WebsiteParsingError:
# it is possible that cookies may not work anymore,
# it should be due to updates in the website,
# this can happen when opening the addon while executing update_profiles_data
import traceback
common.warn('Failed to refresh session data, login expired (WebsiteParsingError)')
common.debug(traceback.format_exc())
self.session.cookies.clear()
return self._login()
except requests.exceptions.RequestException:
import traceback
common.warn('Failed to refresh session data, request error (RequestException)')
common.warn(traceback.format_exc())
if raise_exception:
raise
except Exception:
import traceback
common.warn('Failed to refresh session data, login expired (Exception)')
common.debug(traceback.format_exc())
self.session.cookies.clear()
if raise_exception:
raise
return False
def _login(self, modal_error_message=False):
raise NotImplementedError
def _prepare_request_properties(self, component, kwargs):
data = kwargs.get('data', {})
headers = kwargs.get('headers', {})
params = kwargs.get('params', {})
if component in ['set_video_rating', 'set_thumb_rating', 'update_my_list', 'pin_service']:
headers.update({
'Content-Type': 'application/json',
'Accept': 'application/json, text/javascript, */*'})
data['authURL'] = self.auth_url
data = json.dumps(data)
return data, headers, params
def _document_url(component):
return BASE_URL + URLS[component]['endpoint']
def _api_url(component):
return '{baseurl}{componenturl}'.format(
baseurl=g.LOCAL_DB.get_value('api_endpoint_url', table=TABLE_SESSION),
componenturl=URLS[component]['endpoint'])
def _raise_api_error(decoded_response):
if decoded_response.get('status', 'success') == 'error':
raise APIError(decoded_response.get('message'))
return decoded_response
| [
"[email protected]"
] | |
d702030522318f0d1aa5c9c134e670bf2dd23db5 | 10f091bf946bdd6b50c3fa0637504ab19d9c65c2 | /albums/3/challenge41_easy/code.py | ad0dfe1fae293f17ce7bc56dcbb430c51c67f770 | [] | no_license | Rich43/rog | ccebee00b982579c46c30a7dab55b4dbe6396fdc | 029dd57c920aa869750b809d22092c9614e67ba9 | refs/heads/master | 2023-01-23T07:07:16.069821 | 2023-01-19T19:10:43 | 2023-01-19T19:10:43 | 109,163,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | ''' Write a program that will accept a sentence as input and then output that sentence surrounded by some type of an ASCII decoratoin banner.
Sample run:
Enter a sentence: So long and thanks for all the fish
Output
*****************************************
* *
* So long and thanks for all the fish *
* *
*****************************************
Bonus: If the sentence is too long, move words to the next line.
'''
def outer():
global leng
return ('x' * (leng +6))
def inner():
global leng
return ('x' + (' ' * (leng + 4)) + 'x')
def string():
global quote
return ('x' + ' ' * 2 + quote + ' ' * 2 + 'x')
if __name__ == '__main__':
#quote = input("Let's have a quote...: ")
quote = 'I am a python'
leng = len(quote)
out = outer()
inn = inner()
txt = string()
print(out + "\n" + inn + "\n" + txt + "\n" + inn + "\n" + out) | [
"[email protected]"
] | |
fd1757cbaefda4ceaf1aa771e045b08774c21f1c | b7086d5e907aaf983af5b8d7d6f74c4fc6e40f23 | /RA5/Skimmer/HLTSkimmer.py | eee2d8bf3b15d8d4b70614a251575da60d72ad3c | [] | no_license | ahmad3213/PyNTupleProcessor | da40c596f275406f21e83e117c5b8020d6ee309c | c84fa597b132e91342226b12a74213f675c0b125 | refs/heads/master | 2023-03-29T18:59:19.454585 | 2020-09-21T21:52:25 | 2020-09-21T21:52:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,299 | py | from Core.Module import Module
class HLTSkimmer(Module):
def __init__(self,name,emulation=False,cutflow="SR"):
super(HLTSkimmer,self).__init__(name)
self.emulation = emulation
self.cutflow = cutflow
def return_sr_trigger(self,event):
if self.emulation or (self.dataset.isData and "2016" in self.dataset.parent.name):
notRunH = ("2016H" not in self.dataset.parent.name and self.dataset.isData) or self.dataset.isMC
if event.htJet40[0] < 300.:
if abs(event.firstLep.pdgId) == abs(event.secondLep.pdgId) and abs(event.firstLep.pdgId) == 11:
return event.HLT_BIT_HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL_DZ_v[0]
elif abs(event.firstLep.pdgId) == abs(event.secondLep.pdgId) and abs(event.firstLep.pdgId) == 13:
if notRunH:
return event.HLT_BIT_HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_v[0] or event.HLT_BIT_HLT_Mu17_TrkIsoVVL_TkMu8_TrkIsoVVL_v[0]
else:
return event.HLT_BIT_HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_v[0] or event.HLT_BIT_HLT_Mu17_TrkIsoVVL_TkMu8_TrkIsoVVL_DZ_v[0]
elif (abs(event.firstLep.pdgId) == 13 and abs(event.secondLep.pdgId) == 11) or (abs(event.firstLep.pdgId) == 11 and abs(event.secondLep.pdgId) == 13):
if notRunH:
return event.HLT_BIT_HLT_Mu23_TrkIsoVVL_Ele8_CaloIdL_TrackIdL_IsoVL_v[0] or event.HLT_BIT_HLT_Mu8_TrkIsoVVL_Ele23_CaloIdL_TrackIdL_IsoVL_v[0]
else:
return event.HLT_BIT_HLT_Mu23_TrkIsoVVL_Ele8_CaloIdL_TrackIdL_IsoVL_DZ_v[0] or event.HLT_BIT_HLT_Mu8_TrkIsoVVL_Ele23_CaloIdL_TrackIdL_IsoVL_DZ_v[0]
else:
if notRunH:
if abs(event.firstLep.pdgId) == abs(event.secondLep.pdgId) and abs(event.firstLep.pdgId) == 11:
return event.HLT_BIT_HLT_DoubleMu8_Mass8_PFHT300_v[0]
elif abs(event.firstLep.pdgId) == abs(event.secondLep.pdgId) and abs(event.firstLep.pdgId) == 13:
return event.HLT_BIT_HLT_DoubleEle8_CaloIdM_TrackIdM_Mass8_PFHT300_v[0]
elif (abs(event.firstLep.pdgId) == 13 and abs(event.secondLep.pdgId) == 11) or (abs(event.firstLep.pdgId) == 11 and abs(event.secondLep.pdgId) == 13):
return event.HLT_BIT_HLT_Mu8_Ele8_CaloIdM_TrackIdM_Mass8_PFHT300_v[0]
else:
passTrig = False
if abs(event.firstLep.pdgId) == abs(event.secondLep.pdgId) and abs(event.firstLep.pdgId) == 11:
passTrig = event.HLT_BIT_HLT_DoubleMu8_Mass8_PFHT300_v[0]
elif abs(event.firstLep.pdgId) == abs(event.secondLep.pdgId) and abs(event.firstLep.pdgId) == 13:
passTrig = event.HLT_BIT_HLT_DoubleEle8_CaloIdM_TrackIdM_Mass8_PFHT300_v[0]
elif (abs(event.firstLep.pdgId) == 13 and abs(event.secondLep.pdgId) == 11) or (abs(event.firstLep.pdgId) == 11 and abs(event.secondLep.pdgId) == 13):
passTrig = event.HLT_BIT_HLT_Mu8_Ele8_CaloIdM_TrackIdM_Mass8_PFHT300_v[0]
if not passTrig and self.dataset.isData:
return event.HLT_BIT_HLT_PFJet450_v[0]
else:
return passTrig
else:
raise RuntimeError,"Data other than 2016 are not supported atm"
def analyze(self,event):
if self.dataset.isMC and not self.emulation: return True
if not hasattr(event,"firstLep") or not hasattr(event,"secondLep"):
event.tightLeps.sort(key=lambda x: x.pt,reverse=True)
firstLep = event.tightLeps[0]
for l in event.tightLeps[1:]:
if l.charge*event.tightLeps[0].charge > 0.:
secondLep = l
event.firstLep = firstLep
event.secondLep = secondLep
if self.cutflow == "SR":
return self.return_sr_trigger(event)
elif self.cutflow == "TightLoose":
#return self.return_tl_trigger(event)
return self.return_sr_trigger(event)
else:
raise RuntimeError,"cutflow other than SR and TightLoose are not supported atm"
| [
"[email protected]"
] | |
60168d852d76649dbac368f415e7030b85d364e9 | f134679dc39849cc741f5d8aaa63793d7c9f9b7d | /testapi/urls.py | 36438b62909899e93887156062e759da560ad8a0 | [] | no_license | namraht/trial | 792d7c7a427c463ab62b9675e745a7d537e3483c | 3283ee39fcda03f5a1b1a04f3a4939d32ed40ac0 | refs/heads/master | 2020-12-04T01:04:27.056179 | 2016-09-07T21:38:14 | 2016-09-07T21:38:14 | 67,645,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 947 | py | """testapi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from rest_framework.urlpatterns import format_suffix_patterns
from UserInfo import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^users/',views.UsersList.as_view()),
]
urlpatterns=format_suffix_patterns(urlpatterns) | [
"admin"
] | admin |
c160e52892a736da04cfcce881b61a37f4b39b87 | e49a07ad215172e9c82cb418b10371bf0ce1c0f7 | /第1章 python基础/Python基础06-面向对象1/2-创建一个对象.py | 0b4e1cd81068687f7f9621c83d7dba09708014b8 | [] | no_license | taogangshow/python_Code | 829c25a7e32ead388c8b3ffa763cb9cf587bfd7b | 4b3d6992ec407d6069f3187ca7e402a14d863fff | refs/heads/master | 2022-12-16T01:26:17.569230 | 2018-11-16T10:07:59 | 2018-11-16T10:07:59 | 157,832,985 | 0 | 1 | null | 2022-11-25T09:55:32 | 2018-11-16T08:00:13 | Python | UTF-8 | Python | false | false | 174 | py | class Cat:
#属性
#方法
def eat(self):
print("猫在吃鱼...")
def drink(self):
print("猫在喝水...")
#创建一个对象
tom = Cat()
| [
"[email protected]"
] | |
30f21d83dc43f9b8236344d296befd0755ec54a7 | a657cea31cd9f0f69825a458530afac1e0391122 | /examples/GAN/Image2Image.py | 196f7743312bbad6da9d50ee706bb74588fef2e5 | [
"Apache-2.0"
] | permissive | liaoheping/tensorpack | 5f7dd337b6ec22cf7f27a2ee35a4a0c6a602abfa | e04bb2d59ec63f1b6549b485af1e1946376f723d | refs/heads/master | 2021-01-12T05:27:03.291185 | 2017-01-03T14:19:19 | 2017-01-03T14:19:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,240 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: Image2Image.py
# Author: Yuxin Wu <[email protected]>
import numpy as np
import tensorflow as tf
import glob
import pickle
import os
import sys
import argparse
import cv2
from tensorpack import *
from tensorpack.utils.viz import *
from tensorpack.tfutils.summary import add_moving_summary, summary_moving_average
import tensorpack.tfutils.symbolic_functions as symbf
from GAN import GANTrainer, build_GAN_losses
"""
To train:
./Image2Image.py --data /path/to/datadir --mode {AtoB,BtoA}
# datadir should contain jpg images of shpae 2s x s, formed by A and B
# you can download some data from the original authors:
# https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/
# training visualization will appear be in tensorboard
Speed:
On GTX1080 with BATCH=1, the speed is about 9.3it/s (the original torch version is 9.5it/s)
To visualize on test set:
./Image2Image.py --sample --data /path/to/test/datadir --mode {AtoB,BtoA} --load model
"""
SHAPE = 256
BATCH = 1
IN_CH = 3
OUT_CH = 3
LAMBDA = 100
NF = 64 # number of filter
class Model(ModelDesc):
def _get_input_vars(self):
return [InputVar(tf.float32, (None, SHAPE, SHAPE, IN_CH), 'input'),
InputVar(tf.float32, (None, SHAPE, SHAPE, OUT_CH), 'output')]
def generator(self, imgs):
# imgs: input: 256x256xch
# U-Net structure, it's slightly different from the original on the location of relu/lrelu
with argscope(BatchNorm, use_local_stat=True), \
argscope(Dropout, is_training=True):
# always use local stat for BN, and apply dropout even in testing
with argscope(Conv2D, kernel_shape=4, stride=2,
nl=lambda x, name: LeakyReLU(BatchNorm('bn', x), name=name)):
e1 = Conv2D('conv1', imgs, NF, nl=LeakyReLU)
e2 = Conv2D('conv2', e1, NF * 2)
e3 = Conv2D('conv3', e2, NF * 4)
e4 = Conv2D('conv4', e3, NF * 8)
e5 = Conv2D('conv5', e4, NF * 8)
e6 = Conv2D('conv6', e5, NF * 8)
e7 = Conv2D('conv7', e6, NF * 8)
e8 = Conv2D('conv8', e7, NF * 8, nl=BNReLU) # 1x1
with argscope(Deconv2D, nl=BNReLU, kernel_shape=4, stride=2):
return (LinearWrap(e8)
.Deconv2D('deconv1', NF * 8)
.Dropout()
.ConcatWith(3, e7)
.Deconv2D('deconv2', NF * 8)
.Dropout()
.ConcatWith(3, e6)
.Deconv2D('deconv3', NF * 8)
.Dropout()
.ConcatWith(3, e5)
.Deconv2D('deconv4', NF * 8)
.ConcatWith(3, e4)
.Deconv2D('deconv5', NF * 4)
.ConcatWith(3, e3)
.Deconv2D('deconv6', NF * 2)
.ConcatWith(3, e2)
.Deconv2D('deconv7', NF * 1)
.ConcatWith(3, e1)
.Deconv2D('deconv8', OUT_CH, nl=tf.tanh)())
def discriminator(self, inputs, outputs):
""" return a (b, 1) logits"""
l = tf.concat(3, [inputs, outputs])
with argscope(Conv2D, nl=tf.identity, kernel_shape=4, stride=2):
l = (LinearWrap(l)
.Conv2D('conv0', NF, nl=LeakyReLU)
.Conv2D('conv1', NF * 2)
.BatchNorm('bn1').LeakyReLU()
.Conv2D('conv2', NF * 4)
.BatchNorm('bn2').LeakyReLU()
.Conv2D('conv3', NF * 8, stride=1, padding='VALID')
.BatchNorm('bn3').LeakyReLU()
.Conv2D('convlast', 1, stride=1, padding='VALID')())
return l
def _build_graph(self, input_vars):
input, output = input_vars
input, output = input / 128.0 - 1, output / 128.0 - 1
with argscope([Conv2D, Deconv2D],
W_init=tf.truncated_normal_initializer(stddev=0.02)), \
argscope(LeakyReLU, alpha=0.2):
with tf.variable_scope('gen'):
fake_output = self.generator(input)
with tf.variable_scope('discrim'):
real_pred = self.discriminator(input, output)
with tf.variable_scope('discrim', reuse=True):
fake_pred = self.discriminator(input, fake_output)
self.g_loss, self.d_loss = build_GAN_losses(real_pred, fake_pred)
errL1 = tf.reduce_mean(tf.abs(fake_output - output), name='L1_loss')
self.g_loss = tf.add(self.g_loss, LAMBDA * errL1, name='total_g_loss')
add_moving_summary(errL1, self.g_loss)
# tensorboard visualization
if IN_CH == 1:
input = tf.image.grayscale_to_rgb(input)
if OUT_CH == 1:
output = tf.image.grayscale_to_rgb(output)
fake_output = tf.image.grayscale_to_rgb(fake_output)
viz = (tf.concat(2, [input, output, fake_output]) + 1.0) * 128.0
viz = tf.cast(tf.clip_by_value(viz, 0, 255), tf.uint8, name='viz')
tf.summary.image('input,output,fake', viz, max_outputs=max(30, BATCH))
all_vars = tf.trainable_variables()
self.g_vars = [v for v in all_vars if v.name.startswith('gen/')]
self.d_vars = [v for v in all_vars if v.name.startswith('discrim/')]
def split_input(img):
"""
img: an image with shape (s, 2s, 3)
:return: [input, output]
"""
s = img.shape[0]
input, output = img[:, :s, :], img[:, s:, :]
if args.mode == 'BtoA':
input, output = output, input
if IN_CH == 1:
input = cv2.cvtColor(input, cv2.COLOR_RGB2GRAY)[:, :, np.newaxis]
if OUT_CH == 1:
output = cv2.cvtColor(output, cv2.COLOR_RGB2GRAY)[:, :, np.newaxis]
return [input, output]
def get_data():
datadir = args.data
# assume each image is 512x256 split to left and right
imgs = glob.glob(os.path.join(datadir, '*.jpg'))
ds = ImageFromFile(imgs, channel=3, shuffle=True)
ds = MapData(ds, lambda dp: split_input(dp[0]))
augs = [imgaug.Resize(286), imgaug.RandomCrop(256)]
ds = AugmentImageComponents(ds, augs, (0, 1))
ds = BatchData(ds, BATCH)
ds = PrefetchDataZMQ(ds, 1)
return ds
def get_config():
logger.auto_set_dir()
dataset = get_data()
lr = symbolic_functions.get_scalar_var('learning_rate', 2e-4, summary=True)
return TrainConfig(
dataset=dataset,
optimizer=tf.train.AdamOptimizer(lr, beta1=0.5, epsilon=1e-3),
callbacks=Callbacks([
StatPrinter(), PeriodicCallback(ModelSaver(), 3),
ScheduledHyperParamSetter('learning_rate', [(200, 1e-4)])
]),
model=Model(),
step_per_epoch=dataset.size(),
max_epoch=300,
)
def sample(datadir, model_path):
pred = PredictConfig(
session_init=get_model_loader(model_path),
model=Model(),
input_names=['input', 'output'],
output_names=['viz'])
imgs = glob.glob(os.path.join(datadir, '*.jpg'))
ds = ImageFromFile(imgs, channel=3, shuffle=True)
ds = BatchData(MapData(ds, lambda dp: split_input(dp[0])), 6)
pred = SimpleDatasetPredictor(pred, ds)
for o in pred.get_result():
o = o[0][:, :, :, ::-1]
next(build_patch_list(o, nr_row=3, nr_col=2, viz=True))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
parser.add_argument('--sample', action='store_true', help='run sampling')
parser.add_argument('--data', help='Image directory')
parser.add_argument('--mode', choices=['AtoB', 'BtoA'], default='AtoB')
global args
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
assert args.data
if args.sample:
sample(args.data, args.load)
else:
config = get_config()
if args.load:
config.session_init = SaverRestore(args.load)
GANTrainer(config).train()
| [
"[email protected]"
] | |
f847516b8340b2693429d43db570e49faaff7d04 | f3399d1ab1849b267cc83de30044dfe556598262 | /src/main.py | 705fe0947eac4f2daadef39724ef1951b6d1d92f | [] | no_license | JiayuHeUSYD/DARNN_Multi_GPU | 0114c263c026ca39e52fb2f47ed7031204a0d966 | 7e608237a0a7f8165e0e62d9ac50671346e7979b | refs/heads/master | 2020-09-07T09:29:02.305668 | 2019-01-30T05:12:30 | 2019-01-30T05:12:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,562 | py | # -*- coding: utf-8 -*-
from __future__ import print_function, division
import torch
import argparse
import numpy as np
from tensorboardX import SummaryWriter
from DARNN import Encoder, Decoder
from CsiDataSet import CSI300Dataset
from VerParams import Version
def set_seed(seed=1):
'''
https://github.com/pytorch/pytorch/issues/11278
https://github.com/pytorch/pytorch/issues/11278
https://github.com/pytorch/pytorch/issues/12207
'''
import random
import os
random.seed(seed)
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
# Determinism Seed
set_seed()
from torch import nn
from torch import optim
# Parameters settings
parser = argparse.ArgumentParser(description="DA-RNN")
# Dataset setting
parser.add_argument(
'--norm_csi_dir',
type=str,
default='/project/chli/scp/CSI300_NORM/',
help='normalized csi300 csv dir')
parser.add_argument(
'--num_workers',
type=int,
default=12,
help='number of data loading workers (default 3)')
parser.add_argument(
'--dataset_split_ratio',
default=[0.8, 0.1, 0.1],
type=list,
help='train, valid, test dataset split ratio')
parser.add_argument(
'--x_columns',
default=['o', 'h', 'l', 'v', 'a'],
type=list,
help='list of features\' (X) column names')
parser.add_argument(
'--y_columns',
default=['c'],
type=list,
help='list of target (Y) column names')
parser.add_argument(
'--pin_memory', type=bool, default=True, help='pin memory page')
parser.add_argument(
'--debug', type=bool, default=False, help='debug with small data')
# Encoder / Decoder parameters setting
parser.add_argument(
'--hid_dim_encoder',
type=int,
default=32,
help='size of hidden states for the encoder m [64, 128]')
parser.add_argument(
'--hid_dim_decoder',
type=int,
default=32,
help='size of hidden states for the decoder p [64, 128]')
parser.add_argument(
'--ind_steps',
type=int,
default=0,
help='window length for computing indicator')
parser.add_argument(
'--lag_steps',
type=int,
default=20,
help='the number of lag time steps (history window length T)')
parser.add_argument(
'--pred_steps',
type=int,
default=1,
help='y_{t+pred_steps} = p(y_t,...,y_{timesteps-1}, x_t,...,x_{timesteps-1})'
)
# Training parameters setting
parser.add_argument(
'--param_version', type=int, default=None, help='int versioning params')
parser.add_argument(
'--epochs',
type=int,
default=10,
help='number of epochs to train [10, 200, 500]')
parser.add_argument(
'--lr',
type=float,
default=0.001,
help='learning rate [0.001] reduced by 0.1 after each 10000 iterations')
parser.add_argument('--seed', default=1, type=int, help='manual seed')
parser.add_argument(
'--batchsize', type=int, default=512, help='input batch size [128]')
parser.add_argument('--shuffle', type=bool, default=True, help='shuffle batch')
parser.add_argument(
'--task_type', default='single', type=str, help='single or multi')
parser.add_argument(
'--pred_type', default='shift', type=str, help='steps or shift')
# debug
parse_cli = False
opt = parser.parse_args('')
if parse_cli:
opt = parser.parse_args()
if __name__ == "__main__":
# debug
# from importlib import reload
opt.debug = False
opt.num_workers = 20
# import os
# os.environ['CUDA_VISIBLE_DEVICES'] = '0'
ver = Version()
ver.set_ver_opt(opt.param_version, opt)
suffix = 'L%dP%dHdim%d' % (opt.lag_steps, opt.pred_steps, opt.hid_dim_encoder)
writer = SummaryWriter(comment=suffix)
csi300 = CSI300Dataset()
train_dataset, valid_dataset, test_dataset, \
train_loader, valid_loader, test_loader = csi300.get_dataset_loader(
opt)
feat_dim = 13
encoder = Encoder(opt.lag_steps, feat_dim, opt.hid_dim_encoder)
decoder = Decoder(opt.lag_steps, opt.hid_dim_encoder, opt.hid_dim_decoder)
# device = ('cpu')
# Multi-GPU Support
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.device_count() > 1:
encoder = nn.DataParallel(encoder)
decoder = nn.DataParallel(decoder)
encoder.to(device)
decoder.to(device)
criterion = nn.MSELoss()
encoder_optimizer = optim.Adam(encoder.parameters(), lr=opt.lr)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=opt.lr)
# Train Loops
n_batches_count = 1
epoch_batch_loss_list = list()
for epoch in range(opt.epochs):
batch_loss_list = list()
for data_dict in train_loader:
# Prepare Data On Devices
X = data_dict['X'].type(torch.FloatTensor).to(device)
Y = data_dict['Y'].type(torch.FloatTensor).squeeze().to(device)
Ygt = data_dict['Y_gt'].type(torch.FloatTensor).to(device)
# Forward Pass
H = encoder(X)
Ypred = decoder(H, Y)
loss = criterion(Ypred.squeeze(), Ygt)
# Gradient Descent
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
# Log Stats
if n_batches_count % 100 == 0:
writer.add_scalar('train/loss', loss.item(), n_batches_count)
if n_batches_count % 50000 == 0:
for p in encoder_optimizer.param_groups:
p['lr'] *= 0.9
for p in decoder_optimizer.param_groups:
p['lr'] *= 0.9
n_batches_count += 1
print(batch_loss_list)
epoch_batch_loss_list.append(batch_loss_list)
| [
"[email protected]"
] | |
7b8b2c10c5c88c5070533d1be5d7e8280fd94ed0 | 3d060dd745ac19e58255843d496d6afe7168abe2 | /work_for_aca_lsq/make_7122_intensities.py | 5b203f1eb526a63b22446584054feab32121456f | [] | no_license | nksauter/LS49 | 352e96e3601d2475f7f81e0c6a7e4771e9cf9911 | e660c7395e3e3349d43ccd6e59cc099042c5c512 | refs/heads/master | 2023-05-27T01:50:34.996331 | 2023-05-15T22:09:56 | 2023-05-15T22:09:56 | 113,079,929 | 8 | 9 | null | 2023-05-10T18:37:14 | 2017-12-04T18:34:22 | Python | UTF-8 | Python | false | false | 1,552 | py | from __future__ import print_function
from __future__ import division
from six.moves import cPickle as pickle
from six.moves import range
if __name__=="__main__":
from LS49.sim.util_fmodel import gen_fmodel
from LS49.sim.step5_pad import pdb_lines,Fe_oxidized_model,Fe_reduced_model
W2 = 12398.425/7122.
GF = gen_fmodel(resolution=1.9,pdb_text=pdb_lines,algorithm="fft",wavelength=W2)
GF.set_k_sol(0.435)
GF.reset_wavelength(W2)
GF.reset_specific_at_wavelength(label_has="FE1",tables=Fe_oxidized_model,newvalue=W2)
GF.reset_specific_at_wavelength(label_has="FE2",tables=Fe_reduced_model,newvalue=W2)
W2_reduced = GF.get_intensities()
# Einsle paper: Reduced form has
# buried irons, FE1, in Fe(III) state (absorption at higher energy, oxidized)
# surface iron, FE2, in Fe(II) state (absorption at lower energy, reduced)
W2i = W2_reduced.indices()
with (open("debug26.data","w")) as F:
for iw in range(len(W2i)):
print ("%20s, %10.2f"%(W2_reduced.indices()[iw],W2_reduced.data()[iw]), file=F)
intensity_dict = {}
for iw in range(len(W2i)):
intensity_dict[W2_reduced.indices()[iw]] = W2_reduced.data()[iw]
with (open("debug26_intensities.pickle","wb")) as F:
pickle.dump(intensity_dict, F, pickle.HIGHEST_PROTOCOL)
with (open("sfall_7122_amplitudes.pickle","wb")) as F:
pickle.dump(GF.get_amplitudes(), F, pickle.HIGHEST_PROTOCOL)
GF.make_P1_primitive()
with (open("sfall_P1_7122_amplitudes.pickle","wb")) as F:
pickle.dump(GF.get_amplitudes(), F, pickle.HIGHEST_PROTOCOL)
| [
"[email protected]"
] | |
f77db9cc87b1c554b9314bab2c7e4a9c2b3e15af | a332b34158b0b7e6ee04b51c3de54f220a5263ef | /virtual/bin/django-admin.py | eb3dee42caf8171d5fd70aa71c39a49b83b58d4d | [] | no_license | IngabireTina/Ing_api | 41eef33b3df0c289d05c247fc1065295274eda59 | b482b5e97b916063d5a6926ec927246e1f2d276c | refs/heads/master | 2023-04-11T07:46:10.021649 | 2021-04-26T15:05:53 | 2021-04-26T15:05:53 | 361,655,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | #!/home/tina/Desktop/ing_api/virtual/bin/python3.6
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"[email protected]"
] | |
b0234fee86193c0f241ac55eb21416301a933d0c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02785/s477774791.py | 60aa3b22a33800e8d7a30bb8d0694975657ee378 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | N, K = map(int, input().split())
H = list(map(int, input().split()))
H = sorted(H)
hp = 0
for i in range(N-K):
hp += H[i]
print(hp) | [
"[email protected]"
] | |
5a9ba52649275f8cb827ccb6185ac2b1cf2f8f62 | 4e0f2938b003f5d68a57f213e652fbffb2f72ba2 | /FishStat_M.py | 4cb6b0e27eeb6eb548d7e915ecb695342689ce9f | [] | no_license | adcGG/Lianxi | e4b1ce0d3cfc76e625e1e1caca0a58f25ba5d692 | 3659c3ca11a13b4ad54dbd2e669949701bae10b5 | refs/heads/master | 2022-12-13T05:45:41.312292 | 2019-08-14T07:38:19 | 2019-08-14T07:38:19 | 201,189,540 | 0 | 1 | null | 2022-04-22T22:08:16 | 2019-08-08T06:07:53 | Python | UTF-8 | Python | false | false | 1,483 | py | def day_stat(day,fishs):
'''
:param day: 为字符串参数
:param fishs: 为两层嵌套字典参数
:return:
'''
nums = 0
amount = 0
for name0,sub_records in fishs.items():
print('%s 数量 %d 单价%.2f元'%(name0,sub_records[0],sub_records[1]))
nums+=sub_records[0]
amount += sub_records[0]*sub_records[1]
print('%s 数量小计%d金额小计%.2f'%(day,nums,amount))
def allday_stat(fish,maxs):
'''
统计所有鱼,并保存到统计字典里
:param fish: 为两层嵌套字典参数
:return:
'''
name1 = ""
sub_record = {}
stat_record = {}
for day,day_record in fish.items():
for name1,sub_record in day_record.items():
if name1 in stat_record:
stat_record[name1][0]+=sub_record[0]
stat_record[name1][1] += sub_record[0]*sub_record[1]
else:
stat_record[name1] = [sub_record[0],sub_record[0]*sub_record[1]]
for name1,nums in stat_record.items():
if maxs[1] < nums[0]:
maxs[0] = name1
maxs[1] = nums[0]
if maxs[3] <nums[1]:
maxs[2] = name1
maxs[3] = nums[1]
maxs[4] = maxs[4]+nums[0]
maxs[5] = maxs[5]+nums[1]
return stat_record
def PrintMaxValues(maxstat1):
'''
打印最大值
:param maxstat1:[:4]为列表参数,记录最大值。[4]记录总数量 。 [5]记录总金额
:return:
'''
print('最大数量的鱼是%s,%d条'%(maxstat1[0],maxstat1[1]))
print('最大金额的鱼是%s,%.2f元'%(maxstat1[2],maxstat1[3]))
print('钓鱼总数量为%d,总金额为%.2f元'%(maxstat1[4],maxstat1[5]))
| [
"[email protected]"
] | |
ff67401d63b733d6006d89cf2cc4e7cc50d61f84 | 77f537f24d7d1cc2657c581de887e5a4da4c1754 | /python/rapidstream/BE/AnchorPlacement/PairwiseAnchorPlacementForSLRCrossing.py | 995a45e4c269af40afdf71fb7ec1d4c2b6b09818 | [
"LicenseRef-scancode-proprietary-license",
"MIT"
] | permissive | UCLA-VAST/RapidStream | d5e4c75b666693964138d96ffb6bdf4d682cf0e9 | 106ae4055f72b1d59245e0650d7b838d8abdfb78 | refs/heads/main | 2023-04-14T20:40:01.332134 | 2022-12-20T03:01:25 | 2022-12-20T03:01:25 | 321,000,047 | 27 | 7 | MIT | 2022-06-02T04:01:12 | 2020-12-13T06:32:09 | Python | UTF-8 | Python | false | false | 14,359 | py | import itertools
import logging
import json
import operator
import time
from collections import defaultdict
from typing import List, Tuple, Dict
from mip import Model, minimize, CONTINUOUS, xsum, OptimizationStatus
from rapidstream.BE.Utilities import isPairSLRCrossing
from rapidstream.BE.Device.U250 import idx_of_left_side_slice_of_laguna_column
from autobridge.Device.DeviceManager import DeviceU250
from autobridge.Opt.Slot import Slot
U250_inst = DeviceU250()
slice_to_laguna = {idx_of_left_side_slice_of_laguna_column[i] : i \
for i in range(len(idx_of_left_side_slice_of_laguna_column))}
class SLLChannel:
"""
each SLLChannel consists of 24 SLL wires
The bottom/top of those SLL wires are the same with reference to SLICE coordinates
each channel will correspond to 8 laguna sites, each with 6 RX registers
if an anchor is upward, it must be placed on the RX at the top side
otherwise it must be placed on the RX at the bottom side
"""
def __init__(self, bottom_coor_y, i_th_column: int):
self.bottom_coor_x = idx_of_left_side_slice_of_laguna_column[i_th_column]
self.bottom_coor_y = bottom_coor_y
self.top_coor_x = self.bottom_coor_x
self.top_coor_y = bottom_coor_y + 60
self.capacity = 20
self.bottom_slot_y_min = int(bottom_coor_y / 120) * 120
self.bottom_slot_y_max = self.bottom_slot_y_min + 119
self._initRXList(i_th_column, bottom_coor_y)
def __hash__(self):
return hash((self.bottom_coor_x, self.bottom_coor_y))
def __str__(self):
return self.getString()
def getString(self):
return f'X{self.bottom_coor_x}Y{self.bottom_coor_y} <-> X{self.top_coor_x}Y{self.top_coor_y}'
def _initRXList(self, i_th_column, bottom_coor_y):
"""
get the laguna RX registers associated with this channel
"""
bottom_laguna_sites = [
f'LAGUNA_X{x}Y{y}' for x in (i_th_column*2, i_th_column*2+1) \
for y in self._get_nearest_laguna_y(bottom_coor_y) ]
top_laguna_sites = [
f'LAGUNA_X{x}Y{y}' for x in (i_th_column*2, i_th_column*2+1) \
for y in self._get_nearest_laguna_y(bottom_coor_y + 60) ]
# each laguna site has 6 RX registers
self.bottom_laguna_RX = [f'{site}/RX_REG{i}' for i in range(6) for site in bottom_laguna_sites]
self.top_laguna_RX = [f'{site}/RX_REG{i}' for i in range(6) for site in top_laguna_sites]
def _get_nearest_laguna_y(self, slice_y):
"""
convert from SLICE coordinate to laguna coordinate
"""
if 180 <= slice_y <= 299:
laguna_y = (slice_y - 180) * 2 + 120
elif 420 <= slice_y <= 539:
laguna_y = (slice_y - 420) * 2 + 360
elif 660 <= slice_y <= 779:
laguna_y = (slice_y - 660) * 2 + 600
else:
assert False
return (laguna_y, laguna_y+1)
def getCostForAnchor(self, list_of_cell_property_dict: List[Dict], anchor_direction: str) -> float:
"""
the cost for placing an anchor on this channel
"""
SLR_crossing_penalty = 10
SLL_length = 60
lut_penalty = lambda num_lut_on_path : 1 + 0.3 * num_lut_on_path
def getDistFromCells(list_of_cell_property_dict: List[Dict]) -> List[int]:
"""
Distance between the RX of the SLL and the end cell.
If the connection goes up, the dist is between the end cells and the top_coor
Else the dist is between the end cells and the bottom_coor
"""
dists = []
# for loc, type in coor_to_cell_types.items():
for cell_property_dict in list_of_cell_property_dict:
loc = cell_property_dict["normalized_coordinate"]
x, y = loc[0], loc[1]
# determine if the cell is at the top side or bottom side
is_cell_at_bottom = self.bottom_slot_y_min <= y <= self.bottom_slot_y_max
if anchor_direction == 'DOWN':
if is_cell_at_bottom:
orig_dist = abs(x - self.bottom_coor_x) + abs(y - self.bottom_coor_y)
else:
# if a connection goes down, the end cell at the top will connect to
# the input of the SLL at the top, then travel through SLL to the RX at the bottom
orig_dist = SLR_crossing_penalty + SLL_length + abs(x - self.top_coor_x) + abs(y - self.top_coor_y)
elif anchor_direction == 'UP':
if is_cell_at_bottom:
# if a connection goes up, the end cell at the bottom will connect to
# the input of the SLL at the bottom, then travel through SLL to the RX at the top
orig_dist = SLR_crossing_penalty + SLL_length + abs(x - self.bottom_coor_x) + abs(y - self.bottom_coor_y)
else:
orig_dist = abs(x - self.top_coor_x) + abs(y - self.top_coor_y)
else:
assert False
# penaltize wires to LUTs
dists.append(orig_dist * lut_penalty(cell_property_dict["num_lut_on_path"]))
return dists
dists = getDistFromCells(list_of_cell_property_dict)
# avg wire length
dist_score = sum(dists) / len(dists)
unbalance_penalty = max(dists) - min(dists)
# prevent extremely short wires
hold_penalty = max(0, 10 - min(dists))
return dist_score + unbalance_penalty + hold_penalty
def placeAnchor(self, anchor_dir):
"""
mark an RX register as occupied by popping it out
The sites at the top will use the RX from small index to large index
the sites at the bottom will use the RX from large index to small index
Note that each SLL is associate with two RX and two TX registers
so that it can be used in both directions. But only one of them could be used.
"""
if anchor_dir == 'UP':
return self.top_laguna_RX.pop()
elif anchor_dir == 'DOWN':
return self.bottom_laguna_RX.pop(0)
else:
assert False, anchor_dir
def _get_anchor_2_sll_dir(hub, slot1_name, slot2_name, anchor_connections: Dict[str, List[Dict[str, str]]]) -> Dict[str, str]:
"""
each anchor will use one SLL connection.
get which direction will the SLL will be used, upward or downward
"""
slot1 = Slot(U250_inst, slot1_name)
slot2 = Slot(U250_inst, slot2_name)
up_slot = slot1 if slot1.down_left_y > slot2.down_left_y else slot2
# get the downward IO of the upper slot
up_slot_io = hub['PathPlanningWire'][up_slot.getRTLModuleName()]['DOWN']
# double check that the information in the hub is consistent
all_io = hub['SlotIO'][up_slot.getRTLModuleName()]
io_from_all_directions = list(itertools.chain.from_iterable(hub['PathPlanningWire'][up_slot.getRTLModuleName()].values()))
if not len(all_io) == len(io_from_all_directions) + 1: # +1 because of ap_clk
name_all_io = [io[-1] for io in all_io]
name_io_from_all_directions = [io[-1] for io in io_from_all_directions]
diff_list = set(name_all_io) - set(name_io_from_all_directions)
# the only difference should be top-level IOs
assert all('_axi_' in d or 'clk' in d or 'interrupt' == d or 'ap_rst_n' == d for d in diff_list), diff_list
# the output wire of the upper slot will travel DOWN the sll
get_sll_dir = lambda in_or_out : 'DOWN' if in_or_out == 'output' else 'UP'
slot_io_2_sll_dir = {io[-1] : get_sll_dir(io[0]) for io in up_slot_io}
anchor_2_sll_dir = {}
for anchor in anchor_connections.keys():
hls_var_name = anchor.split('_q0_reg')[0]
anchor_2_sll_dir[anchor] = slot_io_2_sll_dir[hls_var_name]
return anchor_2_sll_dir
def getSLLChannelToAnchorCost(
sll_channel_list: List[SLLChannel],
anchor_connections: Dict[str, List[Dict]],
anchor_to_sll_dir: Dict[str, str]):
"""
We need to assign a score if an anchor is placed in a bin
To prevent hold violation, we neglect the length of the SLL. Thus the distance will be
(1) the source cell to the input of the SLL
(2) the output of the SLL to the destination cells
return: SLL channel -> anchor -> score
"""
sll_to_anchor_to_cost = {}
for sll_channel in sll_channel_list:
anchor_to_cost = {anchor : sll_channel.getCostForAnchor(list_of_cell_property_dict, anchor_to_sll_dir[anchor]) \
for anchor, list_of_cell_property_dict in anchor_connections.items()}
sll_to_anchor_to_cost[sll_channel] = anchor_to_cost
anchor_to_sll_to_cost = defaultdict(dict)
for sll, anchor_to_cost in sll_to_anchor_to_cost.items():
for anchor, cost in anchor_to_cost.items():
anchor_to_sll_to_cost[anchor][sll] = cost
saveAnchorToSLLToCost(anchor_to_sll_to_cost)
return sll_to_anchor_to_cost, anchor_to_sll_to_cost
def getSLLChannels(slot1_name: str, slot2_name: str) -> List[SLLChannel]:
"""
get all SLL channels between a slot pair
each channel should have an input coor, an output coor, and 24 RX names
first get the X coor of the 4 columns
"""
slot1 = Slot(U250_inst, slot1_name)
slot2 = Slot(U250_inst, slot2_name)
i_th_column_range = range(slot1.down_left_x * 2, (slot1.up_right_x+1) * 2)
pair_down_left_y = min(slot1.down_left_y, slot2.down_left_y)
if pair_down_left_y == 2:
sll_bottom_y_range = range(180, 240)
elif pair_down_left_y == 6:
sll_bottom_y_range = range(420, 480)
elif pair_down_left_y == 10:
sll_bottom_y_range = range(660, 720)
else:
assert False
sll_channels = [SLLChannel(y, i) for y in sll_bottom_y_range for i in i_th_column_range]
logging.info(f'SLL channel num: {len(sll_channels)}')
logging.info(f'Total SLL channel capacity: {len(sll_channels) * sll_channels[0].capacity }')
return sll_channels
def placeAnchorToSLLChannel(anchor_to_sll_to_cost, pair_name) -> Dict[str, SLLChannel]:
"""
run ILP to map anchor to channels
"""
start_time = time.perf_counter()
get_time_stamp = lambda : time.perf_counter() - start_time
m = Model()
anchor_to_sll_to_var = {}
for anchor, sll_to_cost in anchor_to_sll_to_cost.items():
sll_to_var = {sll : m.add_var(var_type=CONTINUOUS, lb=0, ub=1) for sll in sll_to_cost.keys()}
anchor_to_sll_to_var[anchor] = sll_to_var
sll_to_anchor_to_var = defaultdict(dict)
for anchor, sll_to_var in anchor_to_sll_to_var.items():
for sll, var in sll_to_var.items():
sll_to_anchor_to_var[sll][anchor] = var
# each anchor is placed once
for anchor, sll_to_var in anchor_to_sll_to_var.items():
m += xsum(var for var in sll_to_var.values()) == 1
# limit on sll capacity, currently set to 20/24
for sll, anchor_to_var in sll_to_anchor_to_var.items():
m += xsum(var for var in anchor_to_var.values()) <= sll.capacity
# objective
var_and_cost = []
for anchor, sll_to_cost in anchor_to_sll_to_cost.items():
sll_to_var = anchor_to_sll_to_var[anchor]
for sll in sll_to_cost.keys():
var_and_cost.append((sll_to_var[sll], sll_to_cost[sll]))
m.objective = minimize(xsum(var * cost for var, cost in var_and_cost))
status = m.optimize()
if anchor_to_sll_to_var:
assert status == OptimizationStatus.OPTIMAL or status == OptimizationStatus.FEASIBLE, f'failed in ILP placement for {pair_name}'
anchor_to_sll = {}
for anchor, sll_to_var in anchor_to_sll_to_var.items():
for sll, var in sll_to_var.items():
var_value = round(var.x)
assert abs(var.x - var_value) < 0.000001, var.x
if var_value == 1:
anchor_to_sll[anchor] = sll
return anchor_to_sll
def saveAnchorToSLLToCost(anchor_to_sll_to_cost):
anchor_to_sll_string_to_cost = {}
for anchor, sll_to_cost in anchor_to_sll_to_cost.items():
anchor_to_sll_string_to_cost[anchor] = {sll.getString() : cost for sll, cost in sll_to_cost.items()}
open('debug_anchor_to_bin_to_cost.json', 'w').write(json.dumps(anchor_to_sll_string_to_cost, indent=2))
def _analyzeILPResults(anchor_to_sll_to_cost, anchor_to_selected_bin):
"""
get how optimal is the final position for each anchor
"""
saveAnchorToSLLToCost(anchor_to_sll_to_cost)
ilp_report = {}
for anchor, chosen_bin in anchor_to_selected_bin.items():
ilp_report[anchor] = {}
bin2cost = anchor_to_sll_to_cost[anchor]
all_cost_list = [[cost, bin] for bin, cost in bin2cost.items()]
all_cost_list = sorted(all_cost_list, key=operator.itemgetter(0))
cost_value_list = [x[0] for x in all_cost_list]
ilp_report[anchor]['curr_cost'] = bin2cost[chosen_bin]
ilp_report[anchor]['min_cost'] = all_cost_list[0][0]
ilp_report[anchor]['max_cost'] = all_cost_list[-1][0]
ilp_report[anchor]['rank_of_chosen_bin'] = cost_value_list.index(bin2cost[chosen_bin])
ilp_report[anchor]['total_bin_num'] = len(all_cost_list)
ilp_report[anchor]['bin_location'] = chosen_bin.getString()
optimal_bin = all_cost_list[0][1]
ilp_report[anchor]['optimal_location'] = optimal_bin.getString()
ranks = [anchor_info['rank_of_chosen_bin'] for anchor_info in ilp_report.values()]
if len(ranks):
logging.info(f'average rank of the final placed bins: {sum(ranks) / len(ranks)}')
logging.info(f'worst rank of the final placed bins: {max(ranks)}')
else:
logging.warning(f'no anchors between the pair')
open('ilp_quality_report.json', 'w').write(json.dumps(ilp_report, indent=2))
def placeLagunaAnchors(hub, pair_name: str, anchor_connections: Dict[str, List[Dict[str, str]]]) -> Dict[str, str]:
"""
separally handle the anchor placement for SLR crossing pairs
The source cannot be too close to the chose SLL
There are 4 laguna columns between a pair of slots
Each column has 1440 SLL wires
We divide the 1440 SLL into 60 channels, each of 24 wire.
Wires in the same channel has the same src/sink position thus are deemed the same
Thus we have 4 * 60 = 240 bins, each with a capacity of 24
Each SLL is of 60 SLICE high, thus each bin has a input coor and an output coor differed by 60
"""
slot1_name, slot2_name = pair_name.split('_AND_')
assert isPairSLRCrossing(slot1_name, slot2_name)
sll_channels = getSLLChannels(slot1_name, slot2_name)
anchor_to_sll_dir = _get_anchor_2_sll_dir(hub, slot1_name, slot2_name, anchor_connections)
logging.info(f'anchor num: {len(anchor_to_sll_dir.keys())}')
_, anchor_to_sll_to_cost = getSLLChannelToAnchorCost(sll_channels, anchor_connections, anchor_to_sll_dir)
anchor_to_sll = placeAnchorToSLLChannel(anchor_to_sll_to_cost, pair_name)
_analyzeILPResults(anchor_to_sll_to_cost, anchor_to_sll)
anchor_to_laguna_reg = {}
for anchor, sll in anchor_to_sll.items():
anchor_dir = anchor_to_sll_dir[anchor]
anchor_to_laguna_reg[anchor] = sll.placeAnchor(anchor_dir)
return anchor_to_laguna_reg | [
"[email protected]"
] | |
883879f04c62bf4e480cf92b5d51696e945aea20 | fc4fb632da74ba1b535192f26f64cbb3aa124c2d | /tests/scripts/thread-cert/Cert_5_3_08_ChildAddressSet.py | 9613af53472edcfbdbfd5043edf8aaa94ad564c2 | [
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | sbobrowicz/openthread | dc1fd4caed785a5d3ff9365530b0030e3498d3eb | a43fb455d99d3692bdc68aa6d9be96f973a1a4ea | refs/heads/master | 2021-01-16T23:19:11.674806 | 2016-06-24T19:43:02 | 2016-06-24T19:43:02 | 61,918,103 | 1 | 1 | null | 2016-06-24T23:38:19 | 2016-06-24T23:38:18 | null | UTF-8 | Python | false | false | 4,073 | py | #!/usr/bin/python
#
# Copyright (c) 2016, Nest Labs, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import pexpect
import time
import unittest
import node
LEADER = 1
ED1 = 2
ED2 = 3
ED3 = 4
ED4 = 5
class Cert_5_3_8_ChildAddressSet(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,6):
self.nodes[i] = node.Node(i)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ED1].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ED2].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ED3].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ED4].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ED1].set_panid(0xface)
self.nodes[ED1].set_mode('rsn')
self.nodes[ED1].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED1].enable_whitelist()
self.nodes[ED2].set_panid(0xface)
self.nodes[ED2].set_mode('rsn')
self.nodes[ED2].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED2].enable_whitelist()
self.nodes[ED3].set_panid(0xface)
self.nodes[ED3].set_mode('rsn')
self.nodes[ED3].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED3].enable_whitelist()
self.nodes[ED4].set_panid(0xface)
self.nodes[ED4].set_mode('rsn')
self.nodes[ED4].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED4].enable_whitelist()
def tearDown(self):
for node in self.nodes.itervalues():
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER].start()
self.nodes[LEADER].set_state('leader')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ED1].start()
time.sleep(3)
self.assertEqual(self.nodes[ED1].get_state(), 'child')
self.nodes[ED2].start()
time.sleep(3)
self.assertEqual(self.nodes[ED2].get_state(), 'child')
self.nodes[ED3].start()
time.sleep(3)
self.assertEqual(self.nodes[ED3].get_state(), 'child')
self.nodes[ED4].start()
time.sleep(3)
self.assertEqual(self.nodes[ED4].get_state(), 'child')
for i in range(2,6):
addrs = self.nodes[i].get_addrs()
for addr in addrs:
if addr[0:4] != 'fe80':
self.nodes[LEADER].ping(addr)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
7274b14eb762afe828b515ecea48e3d6adf0ee84 | 818e11a0545de5ed0337e5baa4b92a732bd79521 | /leetcode/python/128_Longest_Consecutive_Sequence.py | 7262c304222f95a03e296ebf7273426408d6570c | [] | no_license | JaySurplus/online_code | 85300fb63dd4020d9135e32dfad5792850d335f6 | 8f44df0bcb521bbc3a7ff2564cbe931e146ae297 | refs/heads/master | 2021-01-20T09:07:30.038815 | 2018-08-21T15:08:01 | 2018-08-21T15:08:01 | 34,469,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,040 | py | """
128. Longest Consecutive Sequence
Given an unsorted array of integers, find the length of the longest consecutive elements sequence.
For example,
Given [100, 4, 200, 1, 3, 2],
The longest consecutive elements sequence is [1, 2, 3, 4]. Return its length: 4.
Your algorithm should run in O(n) complexity.
"""
import time
class Solution(object):
def longestConsecutive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums = set(nums)
dic = {}
#for i in nums:
# print i
best = 0
while nums:
m = n = nums.pop()
while m - 1 in nums:
nums.remove(m-1)
m -= 1
while n + 1 in nums:
nums.remove(n+1)
n += 1
best = max(best , n - m +1)
return best
sol = Solution()
nums = [100,101,102,103 ,5,4,200,6,8,201,7,1,3,2 , 105 ,104]
#nums = [1,-8,7,-2,-4,-4,6,3,-4,0,-7,-1,5,1,-9,-3]
#nums = [1,2,3,4,5,0, -1]
res = sol.longestConsecutive(nums)
print res
| [
"[email protected]"
] | |
4bf857590fcb621dad5d6aade0336448fb6ec833 | eacff46eda2c6b509449979a16002b96d4645d8e | /Collections-a-installer/community-general-2.4.0/plugins/modules/online_server_facts.py | 5902f477135e736171debe37114a81e8d66abb26 | [
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later"
] | permissive | d-amien-b/simple-getwordpress | 5e6d4d15d5f87124ab591e46b63fec552998fdc3 | da90d515a0aa837b633d50db4d91d22b031c04a2 | refs/heads/master | 2023-04-08T22:13:37.347545 | 2021-04-06T09:25:51 | 2021-04-06T09:25:51 | 351,698,069 | 0 | 0 | MIT | 2021-03-31T16:16:45 | 2021-03-26T07:30:00 | HTML | UTF-8 | Python | false | false | 35 | py | cloud/online/online_server_facts.py | [
"[email protected]"
] | |
ea3d92ac6eaa0e3765679bde1dcdd3826f9104b9 | 4a8c1f7d9935609b780aff95c886ef7781967be0 | /Flask/module/root/__init__.py | 9e57fe84d463cdf6098fd2ee5cf2c0332eda0a49 | [] | no_license | recuraki/PythonJunkTest | d5e5f5957ac5dd0c539ef47759b1fe5ef7a2c52a | 2556c973d468a6988d307ce85c5f2f8ab15e759a | refs/heads/master | 2023-08-09T17:42:21.875768 | 2023-07-18T23:06:31 | 2023-07-18T23:06:31 | 13,790,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | #!env python
# coding:utf-8
from flask import Flask
from hoge.main import hoge
app = Flask(__name__)
app.debug = True
app.config.from_envvar('FLASK_APP_SETTINGS', silent=True)
app.register_module(hoge, url_prefix="/moge")
@app.route("/")
def index():
return("index")
if __name__ == '__main__':
app.run()
| [
"[email protected]"
] | |
221143b22b0808f84df7a4d31d7bc24371fbb9bf | 6189f34eff2831e3e727cd7c5e43bc5b591adffc | /WebMirror/management/rss_parser_funcs/feed_parse_extractTxytranslationsWordpressCom.py | 65323eb27a4c3c955883db9a3025cf3603421cb6 | [
"BSD-3-Clause"
] | permissive | fake-name/ReadableWebProxy | 24603660b204a9e7965cfdd4a942ff62d7711e27 | ca2e086818433abc08c014dd06bfd22d4985ea2a | refs/heads/master | 2023-09-04T03:54:50.043051 | 2023-08-26T16:08:46 | 2023-08-26T16:08:46 | 39,611,770 | 207 | 20 | BSD-3-Clause | 2023-09-11T15:48:15 | 2015-07-24T04:30:43 | Python | UTF-8 | Python | false | false | 570 | py |
def extractTxytranslationsWordpressCom(item):
'''
Parser for 'txytranslations.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| [
"[email protected]"
] | |
f8957ea5ea0f59fe279970da77af2889af7bfebd | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_noting.py | 23cacc23506f5124a72149831f5c2e50cc7fbca8 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py |
#calss header
class _NOTING():
def __init__(self,):
self.name = "NOTING"
self.definitions = note
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['note']
| [
"[email protected]"
] | |
afa762b941f1e50c05f69a041402ec8dac2894e1 | a2c7bc7f0cf5c18ba84e9a605cfc722fbf169901 | /python_1_to_1000/871_Minimum_Number_of_Refueling_Stops.py | cd4663edd331b07e501988b519ace8b0387a1bb5 | [] | no_license | jakehoare/leetcode | 3bf9edd499034ce32be462d4c197af9a8ed53b5d | 05e0beff0047f0ad399d0b46d625bb8d3459814e | refs/heads/master | 2022-02-07T04:03:20.659422 | 2022-01-26T22:03:00 | 2022-01-26T22:03:00 | 71,602,471 | 58 | 38 | null | null | null | null | UTF-8 | Python | false | false | 2,363 | py | _author_ = 'jake'
_project_ = 'leetcode'
# https://leetcode.com/problems/minimum-number-of-refueling-stops/
# A car travels from a starting position to a destination which is target miles east of the starting position.
# Along the way, there are gas stations.
# Each station[i] represents a gas station that is station[i][0] miles east of the starting position,
# and has station[i][1] liters of gas.
# The car starts with an infinite tank of gas, which initially has startFuel liters of fuel in it.
# It uses 1 liter of gas per 1 mile that it drives.
# When the car reaches a gas station, it may stop and refuel, transferring all the gas from the station into the car.
# What is the least number of refueling stops the car must make in order to reach its destination?
# If it cannot reach the destination, return -1.
# Note that if the car reaches a gas station with 0 fuel left, the car can still refuel there.
# If the car reaches the destination with 0 fuel left, it is still considered to have arrived.
# Maintain a heap of fuel at previous stations that has not been used. At each station the total fuel used must not be
# less than the distance. If it is less, use fuel from previous stations starting with the largest amounts. If no more
# fuel is unused, we cannot reach the target.
# Time - O(n log n)
# Space - O(n)
import heapq
class Solution:
def minRefuelStops(self, target, startFuel, stations):
"""
:type target: int
:type startFuel: int
:type stations: List[List[int]]
:rtype: int
"""
stops = 0
fuel = startFuel # total fuel used
past_fuels = [] # heap of unused fuel from previous stations
stations.append([target, 0]) # target is beyond final station
for distance, station_fuel in stations:
while fuel < distance: # cannot reach this station without more fuel
if not past_fuels: # no more unused previous stations
return -1
fuel -= heapq.heappop(past_fuels) # use the previous station with the most fuel
stops += 1
heapq.heappush(past_fuels, -station_fuel) # add this station's fuel to unused fuel
return stops | [
"[email protected]"
] | |
64a41985166cecbfeb82821809d57b7442637d8d | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/CaptureCreateDTO.py | aab34c8dce4d17455c21624f902a4b43cf45d6cc | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,388 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class CaptureCreateDTO(object):
def __init__(self):
self._capture_no = None
self._out_biz_no = None
@property
def capture_no(self):
return self._capture_no
@capture_no.setter
def capture_no(self, value):
self._capture_no = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
def to_alipay_dict(self):
params = dict()
if self.capture_no:
if hasattr(self.capture_no, 'to_alipay_dict'):
params['capture_no'] = self.capture_no.to_alipay_dict()
else:
params['capture_no'] = self.capture_no
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CaptureCreateDTO()
if 'capture_no' in d:
o.capture_no = d['capture_no']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
return o
| [
"[email protected]"
] | |
e5d4969ae671189786a083b8438e5cbbb026b013 | 59fbeea017110472a788218db3c6459e9130c7fe | /maximum-swap/maximum-swap.py | 3e4a1fe1c9171bb242c09a041e623ad88bc2d7e4 | [] | no_license | niufenjujuexianhua/Leetcode | 82b55d9382bc9f63f4d9da9431194e20a4d299f1 | 542c99e038d21429853515f62af51a77deaa4d9c | refs/heads/master | 2022-04-27T16:55:00.035969 | 2022-03-10T01:10:04 | 2022-03-10T01:10:04 | 79,742,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | class Solution:
def maximumSwap(self, num: int) -> int:
A =list(str(num))
ans = A[:]
for i in range(len(A)):
for j in range(i+1,len(A)):
A[i],A[j] = A[j],A[i]
if A>ans:
ans = A[:]
A[i],A[j] = A[j],A[i]
return int(''.join(ans)) | [
"[email protected]"
] | |
6b9dec43d43bea442c82ebcbf64509eb70bf7973 | 68d267a3e352e40dd2e21359fabb7c97ce9c26aa | /2SAT/cases.py | 603243d0922ff178a90b672d9cd64b8675e1476a | [] | no_license | danyuanwang/karatsuba_mult | ac2ad60e98c05910036483f8e418b8478b27081f | a357af85e094f5836c2fbdabebf141b027a13076 | refs/heads/master | 2023-06-11T06:43:43.504877 | 2021-07-03T21:05:09 | 2021-07-03T21:05:09 | 285,746,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,036 | py | import math
class Cases:
def __init__(self, link):
self.size = 0
self.cases = []
handle = open(link)
for line in handle:
a = line.split()
#print(a)
res = [int(i) for i in a]
self.cases.append(res)
self.size += 1
for j in range(100):
removableValues = []
absRemovableValues = []
counter = 0
for case in self.cases:
counter += 1
print(j, counter, len(self.cases)," init")
for value in case:
if abs(value) not in absRemovableValues:
removableValues.append(value)
absRemovableValues.append(abs(value))
else:
if -value in removableValues:
removableValues.remove(-value)
#absRemovableValues.remove(abs(value))
for case in self.cases:
for value in case:
if value in removableValues:
self.cases.remove(case)
break
#test one case in the set
#true if the first value or the second value == true
#* negative indicates not
def test_case(self, index, values):
#print(int(self.cases[index][0]))
value1 = values[abs(self.cases[index][0])]
value2 = values[abs(self.cases[index][1])]
if self.cases[index][0] < 0:
value1 = not value1
if self.cases[index][1] < 0:
value2 = not value2
return value1 or value2
#test all cases using test above if there is one that evaluates to false return
#that case, otherwise return -1
def test_all_cases(self, values):
for index in range(len(self.cases)):
result = self.test_case(index, values)
if not result:
return index
return -1
| [
"[email protected]"
] | |
96ab6cd72fa5ce049a87e20ed26fe57c4d0a7395 | 30d0ca288a74835836d72804cfbda81e98bfd075 | /core/utils/net_utils_dgcn.py | df07e2e4bc9e3d8b699219e202eb9a316c257bfb | [] | no_license | JoeeYF/GCN-MT | ffe5b0561e7a172c7b7f09f5497640b6a91df63c | 3757af844c94190ad182bfdfed35541b3c30bf56 | refs/heads/master | 2023-04-19T16:33:27.927961 | 2021-04-29T11:21:43 | 2021-04-29T11:21:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,698 | py | import os
import time
import glob
import cv2
import csv
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import albumentations
import pandas as pd
from .tools import *
from .metric_utils import *
from .config import *
from .ramps import *
from core.modules.losses.mse_loss import cls_mse_loss, att_mse_loss, relation_mse_loss
from core.modules.feature_queue import FeatureQueue
from core.data.custom_dataset import *
mask_mse_loss_func = att_mse_loss
consistency_criterion_cls = cls_mse_loss
consistency_criterion_att = att_mse_loss
# sigma_loss_func = nn.SmoothL1Loss()
label_list = ['MEL', 'NV', 'BCC', 'AKIEC', 'BKL', 'DF', 'VASC']
label_to_num = {name: index for index, name in enumerate(label_list)}
num_to_label = {v: k for k, v in label_to_num.items()}
global_step = 0
def get_current_consistency_cls_weight(epoch, config):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return config['consistency_cls'] * sigmoid_rampup(epoch, config['consistency_rampup'], type='cls')
def get_current_consistency_att_weight(epoch, config):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
if epoch < config['consistency_start_epoch']:
return 0.0
else:
return config['consistency_att'] * sigmoid_rampup(epoch, config['consistency_rampup'], type='att')
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def prepare_net(config, model, GCNModel, _use='train'):
# img_size = (config['img_size'], config['img_size'])
def worker_init_fn(worker_id):
random.seed(config['seed']+worker_id)
if _use == 'train':
if config['optim'] == 'Adam':
optimizer = torch.optim.Adam(model.parameters(), config['lr'], weight_decay=config['weight_decay'])
gcn_optimizer = torch.optim.Adam(GCNModel.parameters(), config['lr'], weight_decay=config['weight_decay'])
if config['optim'] == 'RMSprop':
optimizer = torch.optim.RMSprop(model.parameters(), config['lr'], weight_decay=config['weight_decay'])
gcn_optimizer = torch.optim.RMSprop(GCNModel.parameters(), config['lr'], weight_decay=config['weight_decay'])
elif config['optim'] == 'SGD':
optimizer = torch.optim.SGD(model.parameters(), config['lr'], momentum=config['momentum'],
weight_decay=config['weight_decay'], nesterov=config['nesterov'])
gcn_optimizer = torch.optim.SGD(GCNModel.parameters(), config['lr'], momentum=config['momentum'],
weight_decay=config['weight_decay'], nesterov=config['nesterov'])
folds = [fold for fold in range(config['n_fold'])]
train_dataset = CustomDataset('train', config['DataRoot'], config['TrainFold'], None,
transform=albumentations.Compose([
albumentations.Resize(
config['img_size'], config['img_size']),
albumentations.OneOf([
# albumentations.RandomGamma(gamma_limit=(60, 120), p=0.9),
albumentations.RandomBrightnessContrast(brightness_limit=0.05,
contrast_limit=0.05, p=0.9),
], p=0.5),
albumentations.OneOf([
albumentations.Blur(
blur_limit=4, p=1),
# albumentations.MotionBlur(blur_limit=4, p=1),
# albumentations.MedianBlur(
# blur_limit=4, p=1)
], p=0.5),
albumentations.HorizontalFlip(p=0.5),
albumentations.ShiftScaleRotate(shift_limit=0.01, scale_limit=0.01,
rotate_limit=3,
interpolation=cv2.INTER_LINEAR,
border_mode=cv2.BORDER_CONSTANT, p=0.5),
# albumentations.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, always_apply=False, p=1.0)
]),
)
labeled_df = pd.read_csv(config['TrainFold'])
labeled_fold = [i for i in config['label_fold']]
labeled_df = labeled_df[labeled_df.fold_label.isin(labeled_fold)]
labeled_fold_name = labeled_df.image.tolist()
labeled_idxs, unlabeled_idxs = relabel_dataset(
train_dataset, labeled_fold_name)
batch_sampler = TwoStreamBatchSampler(
unlabeled_idxs, labeled_idxs, config['batchsize'], config['label_bs'])
train_loader = torch.utils.data.DataLoader(
train_dataset, num_workers=config['num_workers'], batch_sampler=batch_sampler, pin_memory=True, worker_init_fn=worker_init_fn)
# Count different classes num in train dataset
# all_label = np.array([label for _, _, label, _, _ in train_dataset])
# class_sample_count = np.array([len(np.where(all_label == t)[0]) for t in np.unique(all_label)])
# for index in range(len(config['Data_CLASSES'])):
# print("Train class {}: Num {}".format(index, class_sample_count[index]))
valid_dataset = CustomDataset('valid', config['DataRoot'], config['ValidFold'], None,
transform=albumentations.Compose([
albumentations.Resize(
config['img_size'], config['img_size']),
])
)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=10, shuffle=False,
num_workers=config['num_workers'], drop_last=False,
pin_memory=True, worker_init_fn=worker_init_fn)
# Count different classes num in valid dataset
# all_label = np.array([label for _, _, label, _, _ in valid_dataset])
# class_sample_count = np.array([len(np.where(all_label == t)[0]) for t in np.unique(all_label)])
# for index in range(len(config['Data_CLASSES'])):
# print("Valid class {}: Num {}".format(index, class_sample_count[index]))
return optimizer, gcn_optimizer, train_loader, valid_loader
elif _use == 'infer':
infer_dataset = CustomDataset('infer', config['DataRoot'], config['TestFold'], None,
transform=albumentations.Compose([
albumentations.Resize(
config['img_size'], config['img_size']),
])
)
infer_loader = torch.utils.data.DataLoader(infer_dataset, batch_size=10, shuffle=False,
num_workers=config['num_workers'], drop_last=False,
pin_memory=True, worker_init_fn=worker_init_fn)
return infer_loader
def train_net(visualizer, optimizer, gcn_optimizer, train_loader, val_loader, model, config):
best_metric_dict = {i: 0 for i in ['acc', 'bac', 'auc', 'f1', 'recall', 'tiou', 'tior', 'acc_epoch']}
cls_criterion = nn.NLLLoss()
if config['lr_decay'] == None:
lr_decay = 0.1
else:
lr_decay = config['lr_decay']
for epoch in range(1, config['num_epoch']+1):
adjust_learning_rate(optimizer, epoch - 1, config['num_epoch'], config['lr'], config['lr_decay_freq'], lr_decay)
# adjust_learning_rate(gcn_optimizer, epoch - 21, config['num_epoch'], config['lr'], config['lr_decay_freq'], lr_decay)
train(visualizer, train_loader, model, optimizer, gcn_optimizer, epoch, config, cls_criterion)
if (epoch) % config['valid_freq'] == 0:
best_metric_dict = valid_net(val_loader, model, config, best_metric_dict, epoch)
logging.info('Valid-Cls: Best ACC update to: {:.4f}, from Epoch {}'.format(best_metric_dict['acc'], best_metric_dict['acc_epoch']))
logging.info('Valid-Cls: Best BAC update to: {:.4f}'.format(best_metric_dict['bac']))
logging.info('Valid-Cls: Best AUC update to: {:.4f}'.format(best_metric_dict['auc']))
logging.info('Valid-Cls: Best F1 update to: {:.4f}'.format(best_metric_dict['f1']))
logging.info('Valid-Cls: Best recal update to: {:.4f}'.format(best_metric_dict['recall']))
logging.info('Valid-Cls: Best TIOU update to: {:.4f}'.format(best_metric_dict['tiou']))
logging.info('Valid-Cls: Best TIOR update to: {:.4f}'.format(best_metric_dict['tior']))
def valid_net(val_loader, model, config, best_metric_dict, epoch):
result_s, result_t, result_gcn, TIOU, TIOR = valid(val_loader, model, config)
StudentModel, TeacherModel, GCNStudentModel, GCNTeacherModel = model
m_acc_s, all_acc_s, m_auc_s, all_auc_s, m_recall_s, all_recall_s, m_f1_s, all_f1_s, m_bac_s, all_bac_s = result_s
m_acc_t, all_acc_t, m_auc_t, all_auc_t, m_recall_t, all_recall_t, m_f1_t, all_f1_t, m_bac_t, all_bac_t = result_t
m_acc_gcn, all_acc_gcn, m_auc_gcn, all_auc_gcn, m_recall_gcn, all_recall_gcn, m_f1_gcn, all_f1_gcn, m_bac_gcn, all_bac_gcn = result_gcn
TIOU_s, TIOU_t = TIOU
TIOR_s, TIOR_t = TIOR
mTIOU_s = 0.
mTIOU_t = 0.
assert TIOU_s.shape[1] == TIOU_t.shape[1], "TIOU dimension error"
len_TIOU = TIOU_s.shape[1]
for idx in range(len(config['Data_CLASSES'])):
mTIOU_s += TIOU_s[idx].sum() / float(len_TIOU)
mTIOU_t += TIOU_t[idx].sum() / float(len_TIOU)
mTIOU_s /= float(len(config['Data_CLASSES']))
mTIOU_t /= float(len(config['Data_CLASSES']))
mTIOR_s = 0.
mTIOR_t = 0.
assert TIOR_s.shape[1] == TIOR_t.shape[1], "TIOR dimension error"
len_TIOR = TIOR_s.shape[1]
for idx in range(len(config['Data_CLASSES'])):
mTIOR_s += TIOR_s[idx].sum() / float(len_TIOR)
mTIOR_t += TIOR_t[idx].sum() / float(len_TIOR)
mTIOR_s /= float(len(config['Data_CLASSES']))
mTIOR_t /= float(len(config['Data_CLASSES']))
logging.info('[Student Model]')
logging.info('Valid-Cls: Mean ACC: {:.4f}, Mean BAC: {:.4f}, Mean AUC: {:.4f}, Mean F1: {:.4f}, Mean recall: {:.4f}, Mean TIoU: {:.4f}, Mean TIoR: {:.4f}'.format(m_acc_s,
m_bac_s, m_auc_s, m_f1_s, m_recall_s, mTIOU_s, mTIOR_s))
print_result('Valid-Cls: ACC for All Classes: ', all_acc_s, config['Data_CLASSES'])
print_result('Valid-Cls: BAC for All Classes: ', all_bac_s, config['Data_CLASSES'])
print_result('Valid-Cls: AUC for All Classes: ', all_auc_s, config['Data_CLASSES'])
print_result('Valid-Cls: F1 for All Classes: ', all_f1_s, config['Data_CLASSES'])
print_result('Valid-Cls: recall for All Classes: ', all_recall_s, config['Data_CLASSES'])
print_thresh_result('Valid-TIoU: ', TIOU_s, thresh_TIOU, config['Data_CLASSES'])
print_thresh_result('Valid-TIoR: ', TIOR_s, thresh_TIOR, config['Data_CLASSES'])
logging.info('[Teacher Model]')
logging.info('Valid-Cls: Mean ACC: {:.4f}, Mean BAC: {:.4f}, Mean AUC: {:.4f}, Mean F1: {:.4f}, Mean recall: {:.4f}, Mean TIoU: {:.4f}, Mean TIoR: {:.4f}'.format(m_acc_t,
m_bac_t, m_auc_t, m_f1_t, m_recall_t, mTIOU_t, mTIOR_t))
print_result('Valid-Cls: ACC for All Classes: ', all_acc_t, config['Data_CLASSES'])
print_result('Valid-Cls: BAC for All Classes: ', all_bac_t, config['Data_CLASSES'])
print_result('Valid-Cls: AUC for All Classes: ', all_auc_t, config['Data_CLASSES'])
print_result('Valid-Cls: F1 for All Classes: ', all_f1_t, config['Data_CLASSES'])
print_result('Valid-Cls: recall for All Classes: ', all_recall_t, config['Data_CLASSES'])
print_thresh_result('Valid-TIoU: ', TIOU_t, thresh_TIOU, config['Data_CLASSES'])
print_thresh_result('Valid-TIoR: ', TIOR_t, thresh_TIOR, config['Data_CLASSES'])
logging.info('[GCN Model]')
logging.info('Valid-Cls: Mean ACC: {:.4f}, Mean BAC: {:.4f}, Mean AUC: {:.4f}, Mean F1: {:.4f}, Mean recall: {:.4f}'.format(m_acc_gcn, m_bac_gcn, m_auc_gcn, m_f1_gcn, m_recall_gcn))
print_result('Valid-Cls: ACC for All Classes: ', all_acc_gcn, config['Data_CLASSES'])
print_result('Valid-Cls: BAC for All Classes: ', all_bac_gcn, config['Data_CLASSES'])
print_result('Valid-Cls: AUC for All Classes: ', all_auc_gcn, config['Data_CLASSES'])
print_result('Valid-Cls: F1 for All Classes: ', all_f1_gcn, config['Data_CLASSES'])
print_result('Valid-Cls: recall for All Classes: ', all_recall_gcn, config['Data_CLASSES'])
m_acc = max(m_acc_s, m_acc_t, m_acc_gcn)
m_recall = max(m_recall_s, m_recall_t, m_recall_gcn)
m_bac = max(m_bac_s, m_bac_t, m_bac_gcn)
m_auc = max(m_auc_s, m_auc_t, m_auc_gcn)
m_f1 = max(m_f1_s, m_f1_t, m_f1_gcn)
m_tiou = max(mTIOU_s, mTIOU_t)
m_tior = max(mTIOR_s, mTIOR_t)
if m_acc > best_metric_dict['acc']:
save_checkpoint(StudentModel, 'S_fold' + str(config['label_fold']) + '_' + config['arch'], epoch, config['base_dir'], _best='acc', best=m_acc_s)
save_checkpoint(TeacherModel, 'T_fold' + str(config['label_fold']) + '_' + config['arch'], epoch, config['base_dir'], _best='acc', best=m_acc_t)
save_checkpoint(GCNStudentModel, 'G_fold' + str(config['label_fold']) + '_' + config['arch'], epoch, config['base_dir'], _best='acc', best=m_acc_gcn)
best_metric_dict['acc'] = m_acc
best_metric_dict['acc_epoch'] = epoch
if m_recall >= best_metric_dict['recall']:
save_checkpoint(StudentModel, 'S_fold' + str(config['label_fold']) + '_' + config['arch'], epoch, config['base_dir'], _best='recall', best=m_recall_s)
save_checkpoint(TeacherModel, 'T_fold' + str(config['label_fold']) + '_' + config['arch'], epoch, config['base_dir'], _best='recall', best=m_recall_t)
save_checkpoint(GCNStudentModel, 'G_fold' + str(config['label_fold']) + '_' + config['arch'], epoch, config['base_dir'], _best='recall', best=m_recall_gcn)
best_metric_dict['recall'] = m_recall
if m_bac >= best_metric_dict['bac']:
save_checkpoint(StudentModel, 'S_fold' + str(config['label_fold']) + '_' + config['arch'], epoch, config['base_dir'], _best='bac', best=m_recall_s)
save_checkpoint(TeacherModel, 'T_fold' + str(config['label_fold']) + '_' + config['arch'], epoch, config['base_dir'], _best='bac', best=m_recall_t)
save_checkpoint(GCNStudentModel, 'G_fold' + str(config['label_fold']) + '_' + config['arch'], epoch, config['base_dir'], _best='bac', best=m_recall_gcn)
best_metric_dict['bac'] = m_bac
if m_auc >= best_metric_dict['auc']:
save_checkpoint(StudentModel, 'S_fold' + str(config['label_fold']) + '_' + config['arch'], epoch, config['base_dir'], _best='auc', best=m_auc_s)
save_checkpoint(TeacherModel, 'T_fold' + str(config['label_fold']) + '_' + config['arch'], epoch, config['base_dir'], _best='auc', best=m_auc_t)
save_checkpoint(GCNStudentModel, 'G_fold' + str(config['label_fold']) + '_' + config['arch'], epoch, config['base_dir'], _best='auc', best=m_auc_gcn)
best_metric_dict['auc'] = m_auc
if m_f1 >= best_metric_dict['f1']:
save_checkpoint(StudentModel, 'S_fold' + str(config['label_fold']) + '_' + config['arch'], epoch, config['base_dir'], _best='f1', best=m_f1_s)
save_checkpoint(TeacherModel, 'T_fold' + str(config['label_fold']) + '_' + config['arch'], epoch, config['base_dir'], _best='f1', best=m_f1_t)
save_checkpoint(GCNStudentModel, 'G_fold' + str(config['label_fold']) + '_' + config['arch'], epoch, config['base_dir'], _best='f1', best=m_f1_gcn)
best_metric_dict['f1'] = m_f1
if m_tiou >= best_metric_dict['tiou']:
save_checkpoint(StudentModel, 'S_fold' + str(config['label_fold']) + '_' + config['arch'], epoch, config['base_dir'], _best='tiou', best=mTIOU_s)
save_checkpoint(TeacherModel, 'T_fold' + str(config['label_fold']) + '_' + config['arch'], epoch, config['base_dir'], _best='tiou', best=mTIOU_t)
best_metric_dict['tiou'] = m_tiou
if m_tior >= best_metric_dict['tior']:
save_checkpoint(StudentModel, 'S_fold' + str(config['label_fold']) + '_' + config['arch'], epoch, config['base_dir'], _best='tior', best=mTIOR_s)
save_checkpoint(TeacherModel, 'T_fold' + str(config['label_fold']) + '_' + config['arch'], epoch, config['base_dir'], _best='tior', best=mTIOR_t)
best_metric_dict['tior'] = m_tior
return best_metric_dict
def train(visualizer, train_loader, model, optimizer, gcn_optimizer, epoch, config, cls_criterion):
global global_step
StudentModel, TeacherModel, GCNStudentModel, GCNTeacherModel = model
losses = AverageMeter()
cls_losses = AverageMeter()
attmse_losses = AverageMeter()
attbound_losses = AverageMeter()
src_losses = AverageMeter()
consiscls_losses = AverageMeter()
consisatt_losses = AverageMeter()
batch_time = AverageMeter()
cls_accs = AverageMeter()
cls_accs_gcn = AverageMeter()
cls_AUCs = AverageMeter()
cls_F1s = AverageMeter()
gcn_cls_losses = AverageMeter()
num_classes = len(config['Data_CLASSES'])
StudentModel.train()
TeacherModel.train()
GCNStudentModel.train()
GCNTeacherModel.train()
end = time.time()
StudentFeatureQueue = FeatureQueue(config, 2)
TeacherFeatureQueue = FeatureQueue(config, 2)
for i, (input, ema_input, label, flags, name) in enumerate(train_loader):
with torch.autograd.set_detect_anomaly(True):
image1, masks1 = input
image2, masks2 = ema_input
im_h = image1.size(2)
im_w = image1.size(3)
bs = image1.size(0)
label_bs = config['label_bs']
visualizer.reset()
visual_ret = OrderedDict()
errors_ret = OrderedDict()
image1 = image1.cuda()
masks1 = masks1.cuda()
image2 = image2.cuda()
# masks2 = masks2.cuda()
masks1 = masks1.unsqueeze(1)
# masks2 = masks2.unsqueeze(1)
label = label.cuda()
# flags = flags.cuda()
visual_ret['input'] = image1
masks_vis = visual_masks(masks1, im_h, im_w)
visual_ret['mask'] = masks_vis
output_s, cam_refined_s, feature_s = StudentModel(image1)
output_t, cam_refined_t, feature_t = TeacherModel(image2)
# StudentFeatureQueue.enqueue(feature_s, label)
# TeacherFeatureQueue.enqueue(feature_s, label)
output_feature_gcns, output_gcns = GCNStudentModel(feature_s, feature_s)
output_feature_gcnt, output_gcnt = GCNStudentModel(feature_t, feature_t)
class_idx = label.cpu().long().numpy()
for index, idx in enumerate(class_idx):
tmp1 = cam_refined_s[index, idx, :, :].unsqueeze(0).unsqueeze(1)
tmp2 = cam_refined_t[index, idx, :, :].unsqueeze(0).unsqueeze(1)
if index == 0:
cam_refined_class_s = tmp1
cam_refined_class_t = tmp2
else:
cam_refined_class_s = torch.cat((cam_refined_class_s, tmp1), dim=0)
cam_refined_class_t = torch.cat((cam_refined_class_t, tmp2), dim=0)
cam_refined_s = cam_refined_class_s
cam_refined_t = cam_refined_class_t
# Classification
probe = torch.softmax(output_s, dim=1)
cls_loss = cls_criterion(torch.log(probe[:label_bs]), label[:label_bs])
# Attention
# MSE loss
mask_loss = mask_mse_loss_func(masks1[:label_bs], cam_refined_s[:label_bs])
# Bound loss
# bound_loss = torch.exp(torch.tensor(1) - torch.min(masks1[:label_bs], cam_refined_s[:label_bs]).sum((2, 3)) / torch.clamp(cam_refined_s[:label_bs].sum((2, 3)), min=1e-5))
bound_loss = torch.tensor(1) - torch.min(masks1[:label_bs], cam_refined_s[:label_bs]).sum((2, 3)) / torch.clamp(cam_refined_s[:label_bs].sum((2, 3)), min=1e-5)
bound_loss = bound_loss.sum() / bs
gcams_vis = visual_masks(cam_refined_s.float(), im_h, im_w)
visual_ret['attention'] = gcams_vis
# Attention Consistency
consistency_weight_att = get_current_consistency_att_weight(epoch, config)
consistency_loss_att = consistency_weight_att * consistency_criterion_att(cam_refined_s[label_bs:], cam_refined_t[label_bs:])
# Classification Consistency
consistency_weight_cls = get_current_consistency_cls_weight(epoch, config)
consistency_loss_cls = consistency_weight_att * consistency_criterion_cls(output_s, output_t)
# SRC Loss
consistency_relation_dist = torch.sum(relation_mse_loss(output_feature_gcns, output_feature_gcnt)) / bs
src_loss = consistency_weight_att * consistency_relation_dist*config['src_weight']
# GCN Classification
gcn_probe = torch.softmax(output_gcns, dim=1)
gcn_cls_loss = cls_criterion(torch.log(gcn_probe[:label_bs]), label[:label_bs])*config['gcn_weight']
# GCN Classification Consistency
gcn_consistency_loss_cls = consistency_weight_att * consistency_criterion_cls(output_gcns, output_gcnt)*config['gcn_weight']
total_loss = loss_cls * cls_loss + (loss_masks * mask_loss + loss_bound * bound_loss)*config['attention_weight']
if epoch >= config['gcn_start_epoch']:
total_loss = total_loss+gcn_cls_loss
if epoch >= config['consistency_start_epoch']:
total_loss = total_loss + consistency_loss_cls + consistency_loss_att + gcn_consistency_loss_cls
errors_ret['ClsLoss'] = float(cls_loss)
errors_ret['AttMseLoss'] = float(mask_loss)
errors_ret['AttBoundLoss'] = float(bound_loss)
errors_ret['ConsisClsLoss'] = float(consistency_loss_cls)
errors_ret['ConsisAttLoss'] = float(consistency_loss_att)
errors_ret['SRCLoss'] = float(src_loss)
errors_ret['GCNClsLoss'] = float(gcn_cls_loss)
errors_ret['Loss'] = float(total_loss)
losses.update(total_loss.item(), bs)
cls_losses.update(cls_loss.item(), bs)
attmse_losses.update(mask_loss.item(), bs)
attbound_losses.update(bound_loss.item(), bs)
consiscls_losses.update(consistency_loss_cls.item(), bs)
consisatt_losses.update(consistency_loss_att.item(), bs)
src_losses.update(src_loss.item(), bs)
gcn_cls_losses.update(gcn_cls_loss.item(), bs)
if epoch >= config['consistency_start_epoch']:
optimizer.zero_grad()
gcn_optimizer.zero_grad()
total_loss.backward()
gcn_optimizer.step()
optimizer.step()
elif epoch >= config['gcn_start_epoch']:
# optimizer.zero_grad()
gcn_optimizer.zero_grad()
total_loss.backward()
gcn_optimizer.step()
# optimizer.step()
else:
optimizer.zero_grad()
# gcn_optimizer.zero_grad()
total_loss.backward()
# gcn_optimizer.step()
optimizer.step()
global_step += 1
if epoch < config['gcn_start_epoch'] or epoch >= config['consistency_start_epoch']:
update_ema_variables(StudentModel, TeacherModel, config['ema_decay'], global_step)
if epoch >= config['gcn_start_epoch']:
update_ema_variables(GCNStudentModel, GCNTeacherModel, config['ema_decay'], global_step)
m_acc, _ = calculate_acc(probe.cpu().detach().numpy(), label.cpu().detach().numpy(), config)
cls_accs.update(m_acc, bs)
m_acc_gcn, _ = calculate_acc(gcn_probe.cpu().detach().numpy(), label.cpu().detach().numpy(), config)
cls_accs_gcn.update(m_acc_gcn, bs)
m_auc, _ = calculate_auc(probe.cpu().detach().numpy(), label.cpu().detach().numpy(), config)
cls_AUCs.update(m_auc, bs)
m_f1, _ = calculate_f1(probe.cpu().detach().numpy(), label.cpu().detach().numpy(), config)
cls_F1s.update(m_f1, bs)
batch_time.update(time.time() - end)
end = time.time()
if i % config['print_freq'] == 0:
logging.info('Epoch: [{}][{}/{}]\t'
'Lr: {:.5f} '
'GCNLr: {:.5f} '
'ConsistencyWeightAtt: {:.4f} '
'ClsAcc: {cls_acc.val:.4f} ({cls_acc.avg:.4f}) '
'ClsAccg: {cls_accg.val:.4f} ({cls_accg.avg:.4f}) '
'Loss: {loss.val:.4f} ({loss.avg:.4f}) '
'ClsLoss: {cls_loss.val:.4f} ({cls_loss.avg:.4f}) '
'AttMseloss: {attmse_loss.val:.4f} ({attmse_loss.avg:.4f}) '
'AttBndLoss: {attbnd_loss.val:.4f} ({attbnd_loss.avg:.4f}) '
'ConsisClsLoss: {concls_loss.val:.4f} ({concls_loss.avg:.4f}) '
'ConsisAttLoss: {conatt_loss.val:.4f} ({conatt_loss.avg:.4f}) '
'SRCLoss: {src_loss.val:.4f} ({src_loss.avg:.4f}) '
'GCNClsLoss: {gcn_cls_loss.val:.4f} ({gcn_cls_loss.avg:.4f}) '.format(
epoch, i, len(train_loader), optimizer.param_groups[0]['lr'], gcn_optimizer.param_groups[0]['lr'], consistency_weight_att, cls_acc=cls_accs, cls_accg=cls_accs_gcn, loss=losses, cls_loss=cls_losses, attmse_loss=attmse_losses,
attbnd_loss=attbound_losses, concls_loss=consiscls_losses, conatt_loss=consisatt_losses, src_loss=src_losses, gcn_cls_loss=gcn_cls_losses))
if config['display_id'] > 0:
visualizer.plot_current_losses(epoch, float(i) / float(len(train_loader)), errors_ret)
if i % config['display_freq'] == 0:
visualizer.display_current_results(visual_ret, class_idx[0], epoch, save_result=False)
def valid(valid_loader, model, config):
StudentModel, TeacherModel, GCNStudentModel, GCNTeacherModel = model
batch_time = AverageMeter()
StudentModel.eval()
TeacherModel.eval()
GCNStudentModel.eval()
GCNTeacherModel.eval()
num_classes = len(config['Data_CLASSES'])
counts = np.zeros(num_classes)
TIOU_s = np.zeros((num_classes, len(thresh_TIOU)))
TIOR_s = np.zeros((num_classes, len(thresh_TIOR)))
TIOU_t = np.zeros((num_classes, len(thresh_TIOU)))
TIOR_t = np.zeros((num_classes, len(thresh_TIOR)))
with torch.no_grad():
end = time.time()
for i, (input, ema_input, label, flags, name) in enumerate(valid_loader):
image, masks = input
im_h = image.size(2)
im_w = image.size(3)
bs = image.size(0)
image = image.cuda()
masks = masks.cuda()
label = label.cuda()
masks = masks.unsqueeze(1)
output_s, cam_refined_s, feature_s = StudentModel(image)
output_t, cam_refined_t, feature_t = TeacherModel(image)
# StudentFeatureQueue.enqueue(feature_s, label)
# TeacherFeatureQueue.enqueue(feature_s, label)
output_feature_gcns, output_gcns = GCNStudentModel(feature_s, feature_s)
output_feature_gcnt, output_gcnt = GCNStudentModel(feature_t, feature_t)
class_idx = label.cpu().long().numpy()
for index, idx in enumerate(class_idx):
tmp_s = cam_refined_s[index, idx, :, :].unsqueeze(0).unsqueeze(1)
tmp_t = cam_refined_t[index, idx, :, :].unsqueeze(0).unsqueeze(1)
if index == 0:
cam_refined_class_s = tmp_s
cam_refined_class_t = tmp_t
else:
cam_refined_class_s = torch.cat((cam_refined_class_s, tmp_s), dim=0)
cam_refined_class_t = torch.cat((cam_refined_class_t, tmp_t), dim=0)
cam_refined_s = cam_refined_class_s
cam_refined_t = cam_refined_class_t
probe_s = torch.softmax(output_s, dim=1)
probe_t = torch.softmax(output_t, dim=1)
probe_gcn = torch.softmax(output_gcns, dim=1)
cam_refined_s = cam_refined_s >= cam_thresh
cam_refined_t = cam_refined_t >= cam_thresh
batch_iou_s = single_IOU(cam_refined_s[:, 0, :, :], masks[:, 0, :, :])
batch_ior_s = single_IOR(cam_refined_s[:, 0, :, :], masks[:, 0, :, :])
batch_iou_t = single_IOU(cam_refined_t[:, 0, :, :], masks[:, 0, :, :])
batch_ior_t = single_IOR(cam_refined_t[:, 0, :, :], masks[:, 0, :, :])
# print(TIOU.shape)
# print(TIOR.shape)
for j in range(len(thresh_TIOU)):
if batch_iou_s >= thresh_TIOU[j]:
TIOU_s[class_idx, j] += 1
if batch_iou_t >= thresh_TIOU[j]:
TIOU_t[class_idx, j] += 1
for j in range(len(thresh_TIOR)):
if batch_ior_s >= thresh_TIOR[j]:
TIOR_s[class_idx, j] += 1
if batch_ior_t >= thresh_TIOR[j]:
TIOR_t[class_idx, j] += 1
counts[class_idx] += 1
batch_time.update(time.time() - end)
end = time.time()
if i % (config['print_freq'] * config['batchsize']) == 0:
logging.info('Valid: [{}/{}]\t''Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '.format(i, len(valid_loader), batch_time=batch_time))
if i == 0:
y_true = label.cpu().detach().numpy()
y_pred_s = probe_s.cpu().detach().numpy()
y_pred_t = probe_t.cpu().detach().numpy()
y_pred_gcn = probe_gcn.cpu().detach().numpy()
else:
y_true = np.concatenate((y_true, label.cpu().detach().numpy()), axis=0)
y_pred_s = np.concatenate((y_pred_s, probe_s.cpu().detach().numpy()), axis=0)
y_pred_t = np.concatenate((y_pred_t, probe_t.cpu().detach().numpy()), axis=0)
y_pred_gcn = np.concatenate((y_pred_gcn, probe_gcn.cpu().detach().numpy()), axis=0)
m_acc_s, all_acc_s = calculate_acc(y_pred_s, y_true, config)
m_auc_s, all_auc_s = calculate_auc(y_pred_s, y_true, config)
m_recall_s, all_recall_s = recall(y_pred_s, y_true, config, show_confusion_matrix=True)
m_f1_s, all_f1_s = calculate_f1(y_pred_s, y_true, config)
m_bac_s, all_bac_s = calculate_bac(y_pred_s, y_true, config)
m_acc_t, all_acc_t = calculate_acc(y_pred_t, y_true, config)
m_auc_t, all_auc_t = calculate_auc(y_pred_t, y_true, config)
m_recall_t, all_recall_t = recall(y_pred_t, y_true, config)
m_f1_t, all_f1_t = calculate_f1(y_pred_t, y_true, config)
m_bac_t, all_bac_t = calculate_bac(y_pred_t, y_true, config)
m_acc_gcn, all_acc_gcn = calculate_acc(y_pred_gcn, y_true, config)
m_auc_gcn, all_auc_gcn = calculate_auc(y_pred_gcn, y_true, config)
m_recall_gcn, all_recall_gcn = recall(y_pred_gcn, y_true, config)
m_f1_gcn, all_f1_gcn = calculate_f1(y_pred_gcn, y_true, config)
m_bac_gcn, all_bac_gcn = calculate_bac(y_pred_gcn, y_true, config)
for idx in range(num_classes):
for j in range(len(thresh_TIOU)):
if counts[idx] == 0:
TIOU_s[idx, j] = 0.
TIOU_t[idx, j] = 0.
else:
TIOU_s[idx, j] = float(TIOU_s[idx, j]) / float(counts[idx])
TIOU_t[idx, j] = float(TIOU_t[idx, j]) / float(counts[idx])
for idx in range(num_classes):
for j in range(len(thresh_TIOR)):
if counts[idx] == 0:
TIOR_s[idx, j] = 0.
TIOR_t[idx, j] = 0.
else:
TIOR_s[idx, j] = float(TIOR_s[idx, j]) / float(counts[idx])
TIOR_t[idx, j] = float(TIOR_t[idx, j]) / float(counts[idx])
return [m_acc_s, all_acc_s, m_auc_s, all_auc_s, m_recall_s, all_recall_s, m_f1_s, all_f1_s, m_bac_s, all_bac_s], \
[m_acc_t, all_acc_t, m_auc_t, all_auc_t, m_recall_t, all_recall_t, m_f1_t, all_f1_t, m_bac_t, all_bac_t], \
[m_acc_gcn, all_acc_gcn, m_auc_gcn, all_auc_gcn, m_recall_gcn, all_recall_gcn, m_f1_gcn, all_f1_gcn, m_bac_gcn, all_bac_gcn], \
[TIOU_s, TIOU_t], \
[TIOR_s, TIOR_t]
def infer(infer_loader, model, config):
batch_time = AverageMeter()
model.eval()
num_classes = len(config['Data_CLASSES'])
counts = np.zeros(num_classes)
TIOU = np.zeros((num_classes, len(thresh_TIOU)))
TIOR = np.zeros((num_classes, len(thresh_TIOR)))
all_name = []
with torch.no_grad():
end = time.time()
for i, (input, ema_input, label, flags, name) in enumerate(infer_loader):
all_name = all_name + list(name)
image, masks = input
im_h = image.size(2)
im_w = image.size(3)
bs = image.size(0)
image = image.cuda()
masks = masks.cuda()
label = label.cuda()
masks = masks.unsqueeze(1)
output, cam_refined, cam, = model(image)
class_idx = label.cpu().long().numpy()
for index, idx in enumerate(class_idx):
tmp = cam_refined[index, idx, :, :].unsqueeze(0).unsqueeze(1)
if index == 0:
cam_refined_class = tmp
else:
cam_refined_class = torch.cat((cam_refined_class, tmp), dim=0)
cam_refined = cam_refined_class
probe = torch.softmax(output, dim=1)
cam_refined = cam_refined >= cam_thresh
batch_iou = single_IOU(cam_refined[:, 0, :, :], masks[:, 0, :, :])
batch_ior = single_IOR(cam_refined[:, 0, :, :], masks[:, 0, :, :])
# print(TIOU.shape)
# print(TIOR.shape)
for j in range(len(thresh_TIOU)):
if batch_iou >= thresh_TIOU[j]:
TIOU[class_idx, j] += 1
for j in range(len(thresh_TIOR)):
if batch_ior >= thresh_TIOR[j]:
TIOR[class_idx, j] += 1
counts[class_idx] += 1
batch_time.update(time.time() - end)
end = time.time()
if i % (config['print_freq'] * config['batchsize']) == 0:
logging.info('Infer-Cls: [{}/{}]\t'
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '.format(
i, len(infer_loader), batch_time=batch_time))
if i == 0:
y_gt = label.cpu().detach().numpy()
y_pred = probe.cpu().detach().numpy()
else:
y_gt = np.concatenate((y_gt, label.cpu().detach().numpy()), axis=0)
y_pred = np.concatenate((y_pred, probe.cpu().detach().numpy()), axis=0)
m_acc, all_acc = calculate_acc(y_pred, y_gt, config)
m_auc, all_auc = calculate_auc(y_pred, y_gt, config)
m_recall, all_recall = recall(y_pred, y_gt, config, show_confusion_matrix=True)
m_f1, all_f1 = calculate_f1(y_pred, y_gt, config)
m_bac, all_bac = calculate_bac(y_pred, y_gt, config)
for idx in range(num_classes):
for j in range(len(thresh_TIOU)):
if counts[idx] == 0:
TIOU[idx, j] = 0.
else:
TIOU[idx, j] = float(TIOU[idx, j]) / float(counts[idx])
for idx in range(num_classes):
for j in range(len(thresh_TIOR)):
if counts[idx] == 0:
TIOR[idx, j] = 0.
else:
TIOR[idx, j] = float(TIOR[idx, j]) / float(counts[idx])
mTIOU = 0.
len_TIOU = TIOU.shape[1]
for idx in range(len(config['Data_CLASSES'])):
mTIOU += TIOU[idx].sum() / float(len_TIOU)
mTIOU /= float(len(config['Data_CLASSES']))
mTIOR = 0.
len_TIOR = TIOR.shape[1]
for idx in range(len(config['Data_CLASSES'])):
mTIOR += TIOR[idx].sum() / float(len_TIOR)
mTIOR /= float(len(config['Data_CLASSES']))
# result_dict = {'Path': all_name, 'G0_Pred': y_pred[:, 0], 'G1_Pred': y_pred[:, 1], 'G2_Pred': y_pred[:, 2], 'Label': y_gt}
# result_df = pd.DataFrame(result_dict)
# result_df.to_csv('./outputs/results/{}.csv'.format(config['model_name']), index=False)
logging.info('Infer-Cls: Mean ACC: {:.4f}, Mean BAC: {:.4f}, Mean AUC: {:.4f}, Mean F1: {:.4f}, Mean recall: {:.4f}, Mean TIoU: {:.4f}, Mean TIoR: {:.4f}'.format(
m_acc, m_bac, m_auc, m_f1, m_recall, mTIOU, mTIOR))
print_result('Infer-Cls: ACC for All Classes: ', all_acc, config['Data_CLASSES'])
print_result('Valid-Cls: BAC for All Classes: ', all_bac, config['Data_CLASSES'])
print_result('Infer-Cls: AUC for All Classes: ', all_auc, config['Data_CLASSES'])
print_result('Infer-Cls: F1 for All Classes: ', all_f1, config['Data_CLASSES'])
print_result('Infer-Cls: recall for All Classes: ', all_recall, config['Data_CLASSES'])
print_thresh_result('Infer-TIoU: ', TIOU, thresh_TIOU, config['Data_CLASSES'])
print_thresh_result('Infer-TIoR: ', TIOR, thresh_TIOR, config['Data_CLASSES'])
def single_IOU(pred, target):
pred_class = pred.data.cpu().contiguous().view(-1)
target_class = target.data.cpu().contiguous().view(-1)
pred_inds = pred_class == 1
target_inds = target_class == 1
# Cast to long to prevent overflows
intersection = (pred_inds[target_inds]).long().sum().item()
union = pred_inds.long().sum().item() + target_inds.long().sum().item() - intersection
iou = float(intersection) / float(max(union, 1))
return iou
def single_IOR(pred, target):
pred_class = pred.data.cpu().contiguous().view(-1)
target_class = target.data.cpu().contiguous().view(-1)
pred_inds = pred_class == 1
target_inds = target_class == 1
# Cast to long to prevent overflows
intersection = (pred_inds[target_inds]).long().sum().item()
union = pred_inds.long().sum().item() + target_inds.long().sum().item() - intersection
iou = float(intersection) / float(max(pred_inds.long().sum().item(), 1))
return iou
def visual_masks(masks, im_h, im_w):
mask_vis = masks[0, :, :, :].unsqueeze(0).clone()
mask_one = torch.zeros((1, im_h, im_w)).cuda()
mask_one = mask_one + mask_vis[:, 0, :, :]
mask_one[mask_one >= 1] = 1
vis_mask1 = mask_one.clone()
vis_mask2 = mask_one.clone()
vis_mask3 = mask_one.clone()
vis_mask1[vis_mask1 == 1] = palette[1][0]
vis_mask2[vis_mask2 == 1] = palette[1][1]
vis_mask3[vis_mask3 == 1] = palette[1][2]
vis_mask1 = vis_mask1.unsqueeze(1)
vis_mask2 = vis_mask2.unsqueeze(1)
vis_mask3 = vis_mask3.unsqueeze(1)
vis_mask = torch.cat((vis_mask1, vis_mask2, vis_mask3), 1)
return vis_mask
| [
"[email protected]"
] | |
41d25d6445a09f1adb75f00de0d32733a3f6f56a | e408a1c27efcafaec1f6bf1e6255075eae171102 | /LocationAdapter/src/server.py | 57e659b1da6992afaa047a4ab336305e30ea855a | [] | no_license | adoggie/BlueEarth | acd609a5da9eb0276dc810776f97a35c5b5ccf8c | ef2c4d045d8d933bcbf5789a87a189be6b33947d | refs/heads/master | 2020-03-29T15:31:54.834861 | 2018-12-23T16:03:59 | 2018-12-23T16:03:59 | 150,067,522 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,103 | py | #--coding:utf-8--
import traceback
from datetime import datetime
from logging import getLogger
import gevent
import gevent.socket
from gevent.server import StreamServer
import gevent.event
import gevent.ssl
from mantis.fundamental.utils.importutils import import_class
from mantis.fundamental.application.app import instance
class SocketClientIdentifier(object):
def __init__(self):
self.unique_id = '' # 可以是连接设备的唯一设备标识
self.props = {}
class SocketConnection(object):
def __init__(self,sock,consumer,server=None):
self.server = server
self.sock = sock
self.consumer = consumer
self.datetime = None
self.client_id = SocketClientIdentifier()
def getAddress(self):
return 'RpcConnectionSocket:'+str(self.sock.getsockname())
def open(self):
self.datetime = datetime.now()
return True
def close(self):
if self.sock:
self.sock.close()
self.sock = None
def sendData(self,data):
self.sock.sendall(data)
instance.getLogger().debug( 'sent >> ' + self.hex_dump(data) )
def hex_dump(self, bytes):
dump = ' '.join(map(hex, map(ord, bytes)))
return dump
def recv(self):
while True:
try:
d = self.sock.recv(1000)
if not d:
break
except:
# traceback.print_exc()
break
try:
self.consumer.onData(d)
except:
instance.getLogger().error(traceback.format_exc())
# traceback.print_exc()
instance.getLogger().debug( 'socket disconnected!' )
self.sock = None
class DataConsumer(object):
def __init__(self,accumulator,handler):
self.accumulator = accumulator
self.handler = handler
def onData(self,bytes):
messages = self.accumulator.enqueue(bytes)
for message in messages:
self.handler.handle(message)
class Server(object):
def __init__(self):
self.cfgs = None
self.conns = []
self.server = None
@property
def name(self):
return self.cfgs.get('name')
def init(self,cfgs):
self.cfgs = cfgs
return self
def stop(self):
self.server.stop()
def start(self):
ssl = self.cfgs.get('ssl')
if ssl:
self.server = StreamServer((self.cfgs.get('host'),self.cfgs.get('port')),
self._service,keyfile=self.cfgs.get('keyfile'),
certfile=self.cfgs.get('certfile'))
else:
self.server = StreamServer((self.cfgs.get('host'),self.cfgs.get('port')), self._service)
print 'socket server started!'
self.server.start() #serve_forever() , not block
def _service(self,sock,address):
cfgs = self.cfgs.get('accumulator')
accCls = import_class(cfgs.get('class'))
acc = accCls().init(cfgs)
cfgs = self.cfgs.get('handler')
handlerCls = import_class(cfgs.get('class'))
handler = handlerCls().init(cfgs)
# consumer = DataConsumer(acc,handler)
conn = SocketConnection(sock,handler,self)
self.addConnection(conn)
# handler.setConnection(conn)
handler.setAccumulator(acc)
handler.onConnected(conn,address)
conn.recv()
self.removeConnection(conn)
handler.onDisconnected()
def sendMessage(self,m):
pass
def addConnection(self,conn):
self.conns.append(conn)
def removeConnection(self,conn):
self.conns.remove(conn) | [
"[email protected]"
] | |
b676e67a2be2fd461702f27946b6db58bf5a602a | 1d342125c0f14dcbd56d02f6b85d40beb19b5a10 | /interfaces/animal/stagnant.py | 192590bd354dc9952090ccd44616cff669a99897 | [] | no_license | nss-cohort-36/keahua-arboretum-digital-destroyers | fb570a124c9c77a901722d271ca18c38742a2a37 | 7052fece09c00c32d2c245989315ee5695219a15 | refs/heads/master | 2020-12-19T04:25:13.269263 | 2020-01-29T17:40:00 | 2020-01-29T17:40:00 | 235,619,937 | 0 | 0 | null | 2020-01-29T18:01:38 | 2020-01-22T16:54:07 | Python | UTF-8 | Python | false | false | 148 | py | from .aquatic import IAquatic
class IStagnant(IAquatic):
def __init__(self):
super().__init__()
self.dwell_type = "stillwater" | [
"[email protected]"
] | |
eb99914b6fe3e8f58cfe85a0b2d331a33565fb19 | 60341a48087ba9683a8ee773237426d6e9411cf2 | /hubspot/crm/objects/feedback_submissions/models/previous_page.py | 507d39e5b4f2a07a20d0c700b047b86283d10caa | [] | no_license | dalmaTeam/hubspot-api-python | 86051bc676c4e007f23d7e7b759a3481c13fdea6 | 7c60c0f572b98c73e1f1816bf5981396a42735f6 | refs/heads/master | 2023-06-29T10:53:04.404611 | 2021-07-23T14:03:03 | 2021-07-23T14:03:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,064 | py | # coding: utf-8
"""
Feedback Submissions
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.objects.feedback_submissions.configuration import Configuration
class PreviousPage(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'before': 'str',
'link': 'str'
}
attribute_map = {
'before': 'before',
'link': 'link'
}
def __init__(self, before=None, link=None, local_vars_configuration=None): # noqa: E501
"""PreviousPage - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._before = None
self._link = None
self.discriminator = None
self.before = before
if link is not None:
self.link = link
@property
def before(self):
"""Gets the before of this PreviousPage. # noqa: E501
:return: The before of this PreviousPage. # noqa: E501
:rtype: str
"""
return self._before
@before.setter
def before(self, before):
"""Sets the before of this PreviousPage.
:param before: The before of this PreviousPage. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and before is None: # noqa: E501
raise ValueError("Invalid value for `before`, must not be `None`") # noqa: E501
self._before = before
@property
def link(self):
"""Gets the link of this PreviousPage. # noqa: E501
:return: The link of this PreviousPage. # noqa: E501
:rtype: str
"""
return self._link
@link.setter
def link(self, link):
"""Sets the link of this PreviousPage.
:param link: The link of this PreviousPage. # noqa: E501
:type: str
"""
self._link = link
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PreviousPage):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PreviousPage):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
7717d5b027b8dd04eae03dca16efba7adc05a5a9 | e57d7785276053332c633b57f6925c90ad660580 | /sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2021_02_01/aio/operations/_resolve_private_link_service_id_operations.py | 305883969cbd73df32e03192f06cfe879f752c03 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | adriananeci/azure-sdk-for-python | 0d560308497616a563b6afecbb494a88535da4c5 | b2bdfe659210998d6d479e73b133b6c51eb2c009 | refs/heads/main | 2023-08-18T11:12:21.271042 | 2021-09-10T18:48:44 | 2021-09-10T18:48:44 | 405,684,423 | 1 | 0 | MIT | 2021-09-12T15:51:51 | 2021-09-12T15:51:50 | null | UTF-8 | Python | false | false | 5,539 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ResolvePrivateLinkServiceIdOperations:
"""ResolvePrivateLinkServiceIdOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def post(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.PrivateLinkResource",
**kwargs: Any
) -> "_models.PrivateLinkResource":
"""Gets the private link service ID for the specified managed cluster.
Gets the private link service ID the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters (name, groupId) supplied in order to resolve a private link
service ID.
:type parameters: ~azure.mgmt.containerservice.v2021_02_01.models.PrivateLinkResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResource, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_02_01.models.PrivateLinkResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.post.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PrivateLinkResource')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
post.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resolvePrivateLinkServiceId'} # type: ignore
| [
"[email protected]"
] | |
6e03b2df41395c711153b632f9ae05e2e9e58d42 | b0814b43440a36c9998924c9fe05f335302a2717 | /venv/lib/python2.7/site-packages/nipype/interfaces/slicer/surface.py | 6e84a7875dabad87047f07db0c2d79ef4a403f89 | [
"MIT"
] | permissive | nagyistge/electrode-gui | 0b47324ce8c61ffb54c24c400aee85f16fd79c7a | 6d89c78ea61935042ead5df5e1474101df3557eb | refs/heads/master | 2021-06-03T22:47:30.329355 | 2016-09-13T19:43:31 | 2016-09-13T19:43:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,187 | py | # -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath
import os
class MergeModelsInputSpec(CommandLineInputSpec):
Model1 = File(position=-3, desc="Model", exists=True, argstr="%s")
Model2 = File(position=-2, desc="Model", exists=True, argstr="%s")
ModelOutput = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Model", argstr="%s")
class MergeModelsOutputSpec(TraitedSpec):
ModelOutput = File(position=-1, desc="Model", exists=True)
class MergeModels(SEMLikeCommandLine):
"""title: Merge Models
category: Surface Models
description: Merge the polydata from two input models and output a new model with the added polydata. Uses the vtkAppendPolyData filter. Works on .vtp and .vtk surface files.
version: $Revision$
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/MergeModels
contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH), Daniel Haehn (SPL, BWH)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = MergeModelsInputSpec
output_spec = MergeModelsOutputSpec
_cmd = "MergeModels "
_outputs_filenames = {'ModelOutput':'ModelOutput.vtk'}
class ModelToLabelMapInputSpec(CommandLineInputSpec):
distance = traits.Float(desc="Sample distance", argstr="--distance %f")
InputVolume = File(position=-3, desc="Input volume", exists=True, argstr="%s")
surface = File(position=-2, desc="Model", exists=True, argstr="%s")
OutputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="The label volume", argstr="%s")
class ModelToLabelMapOutputSpec(TraitedSpec):
OutputVolume = File(position=-1, desc="The label volume", exists=True)
class ModelToLabelMap(SEMLikeCommandLine):
"""title: Model To Label Map
category: Surface Models
description: Intersects an input model with an reference volume and produces an output label map.
version: 0.1.0.$Revision: 8643 $(alpha)
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/PolyDataToLabelMap
contributor: Nicole Aucoin (SPL, BWH), Xiaodong Tao (GE)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = ModelToLabelMapInputSpec
output_spec = ModelToLabelMapOutputSpec
_cmd = "ModelToLabelMap "
_outputs_filenames = {'OutputVolume':'OutputVolume.nii'}
class GrayscaleModelMakerInputSpec(CommandLineInputSpec):
InputVolume = File(position=-2, desc="Volume containing the input grayscale data.", exists=True, argstr="%s")
OutputGeometry = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output that contains geometry model.", argstr="%s")
threshold = traits.Float(desc="Grayscale threshold of isosurface. The resulting surface of triangles separates the volume into voxels that lie above (inside) and below (outside) the threshold.", argstr="--threshold %f")
name = traits.Str(desc="Name to use for this model.", argstr="--name %s")
smooth = traits.Int(desc="Number of smoothing iterations. If 0, no smoothing will be done.", argstr="--smooth %d")
decimate = traits.Float(desc="Target reduction during decimation, as a decimal percentage reduction in the number of polygons. If 0, no decimation will be done.", argstr="--decimate %f")
splitnormals = traits.Bool(desc="Splitting normals is useful for visualizing sharp features. However it creates holes in surfaces which affect measurements", argstr="--splitnormals ")
pointnormals = traits.Bool(desc="Calculate the point normals? Calculated point normals make the surface appear smooth. Without point normals, the surface will appear faceted.", argstr="--pointnormals ")
class GrayscaleModelMakerOutputSpec(TraitedSpec):
OutputGeometry = File(position=-1, desc="Output that contains geometry model.", exists=True)
class GrayscaleModelMaker(SEMLikeCommandLine):
"""title: Grayscale Model Maker
category: Surface Models
description: Create 3D surface models from grayscale data. This module uses Marching Cubes to create an isosurface at a given threshold. The resulting surface consists of triangles that separate a volume into regions below and above the threshold. The resulting surface can be smoothed and decimated. This model works on continuous data while the module Model Maker works on labeled (or discrete) data.
version: 3.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GrayscaleModelMaker
license: slicer3
contributor: Nicole Aucoin (SPL, BWH), Bill Lorensen (GE)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = GrayscaleModelMakerInputSpec
output_spec = GrayscaleModelMakerOutputSpec
_cmd = "GrayscaleModelMaker "
_outputs_filenames = {'OutputGeometry':'OutputGeometry.vtk'}
class ProbeVolumeWithModelInputSpec(CommandLineInputSpec):
InputVolume = File(position=-3, desc="Volume to use to 'paint' the model", exists=True, argstr="%s")
InputModel = File(position=-2, desc="Input model", exists=True, argstr="%s")
OutputModel = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output 'painted' model", argstr="%s")
class ProbeVolumeWithModelOutputSpec(TraitedSpec):
OutputModel = File(position=-1, desc="Output 'painted' model", exists=True)
class ProbeVolumeWithModel(SEMLikeCommandLine):
"""title: Probe Volume With Model
category: Surface Models
description: Paint a model by a volume (using vtkProbeFilter).
version: 0.1.0.$Revision: 1892 $(alpha)
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ProbeVolumeWithModel
contributor: Lauren O'Donnell (SPL, BWH)
acknowledgements: BWH, NCIGT/LMI
"""
input_spec = ProbeVolumeWithModelInputSpec
output_spec = ProbeVolumeWithModelOutputSpec
_cmd = "ProbeVolumeWithModel "
_outputs_filenames = {'OutputModel':'OutputModel.vtk'}
class LabelMapSmoothingInputSpec(CommandLineInputSpec):
labelToSmooth = traits.Int(desc="The label to smooth. All others will be ignored. If no label is selected by the user, the maximum label in the image is chosen by default.", argstr="--labelToSmooth %d")
numberOfIterations = traits.Int(desc="The number of iterations of the level set AntiAliasing algorithm", argstr="--numberOfIterations %d")
maxRMSError = traits.Float(desc="The maximum RMS error.", argstr="--maxRMSError %f")
gaussianSigma = traits.Float(desc="The standard deviation of the Gaussian kernel", argstr="--gaussianSigma %f")
inputVolume = File(position=-2, desc="Input label map to smooth", exists=True, argstr="%s")
outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Smoothed label map", argstr="%s")
class LabelMapSmoothingOutputSpec(TraitedSpec):
outputVolume = File(position=-1, desc="Smoothed label map", exists=True)
class LabelMapSmoothing(SEMLikeCommandLine):
"""title: Label Map Smoothing
category: Surface Models
description: This filter smoothes a binary label map. With a label map as input, this filter runs an anti-alising algorithm followed by a Gaussian smoothing algorithm. The output is a smoothed label map.
version: 1.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/LabelMapSmoothing
contributor: Dirk Padfield (GE), Josh Cates (Utah), Ross Whitaker (Utah)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. This filter is based on work developed at the University of Utah, and implemented at GE Research.
"""
input_spec = LabelMapSmoothingInputSpec
output_spec = LabelMapSmoothingOutputSpec
_cmd = "LabelMapSmoothing "
_outputs_filenames = {'outputVolume':'outputVolume.nii'}
class ModelMakerInputSpec(CommandLineInputSpec):
InputVolume = File(position=-1, desc="Input label map. The Input Volume drop down menu is populated with the label map volumes that are present in the scene, select one from which to generate models.", exists=True, argstr="%s")
color = File(desc="Color table to make labels to colors and objects", exists=True, argstr="--color %s")
modelSceneFile = traits.Either(traits.Bool, InputMultiPath(File(), ), hash_files=False, desc="Generated models, under a model hierarchy node. Models are imported into Slicer under a model hierarchy node, and their colors are set by the color table associated with the input label map volume. The model hierarchy node must be created before running the model maker, by selecting Create New ModelHierarchy from the Models drop down menu. If you're running from the command line, a model hierarchy node in a new mrml scene will be created for you.", argstr="--modelSceneFile %s...")
name = traits.Str(desc="Name to use for this model. Any text entered in the entry box will be the starting string for the created model file names. The label number and the color name will also be part of the file name. If making multiple models, use this as a prefix to the label and color name.", argstr="--name %s")
generateAll = traits.Bool(desc="Generate models for all labels in the input volume. select this option if you want to create all models that correspond to all values in a labelmap volume (using the Joint Smoothing option below is useful with this option). Ignores Labels, Start Label, End Label settings. Skips label 0.", argstr="--generateAll ")
labels = InputMultiPath(traits.Int, desc="A comma separated list of label values from which to make models. f you specify a list of Labels, it will override any start/end label settings. If you click Generate All Models it will override the list of labels and any start/end label settings.", sep=",", argstr="--labels %s")
start = traits.Int(desc="If you want to specify a continuous range of labels from which to generate models, enter the lower label here. Voxel value from which to start making models. Used instead of the label list to specify a range (make sure the label list is empty or it will over ride this).", argstr="--start %d")
end = traits.Int(desc="If you want to specify a continuous range of labels from which to generate models, enter the higher label here. Voxel value up to which to continue making models. Skip any values with zero voxels.", argstr="--end %d")
skipUnNamed = traits.Bool(desc="Select this to not generate models from labels that do not have names defined in the color look up table associated with the input label map. If true, only models which have an entry in the color table will be generated. If false, generate all models that exist within the label range.", argstr="--skipUnNamed ")
jointsmooth = traits.Bool(desc="This will ensure that all resulting models fit together smoothly, like jigsaw puzzle pieces. Otherwise the models will be smoothed independently and may overlap.", argstr="--jointsmooth ")
smooth = traits.Int(desc="Here you can set the number of smoothing iterations for Laplacian smoothing, or the degree of the polynomial approximating the windowed Sinc function. Use 0 if you wish no smoothing. ", argstr="--smooth %d")
filtertype = traits.Enum("Sinc", "Laplacian", desc="You can control the type of smoothing done on the models by selecting a filter type of either Sinc or Laplacian.", argstr="--filtertype %s")
decimate = traits.Float(desc="Chose the target reduction in number of polygons as a decimal percentage (between 0 and 1) of the number of polygons. Specifies the percentage of triangles to be removed. For example, 0.1 means 10% reduction and 0.9 means 90% reduction.", argstr="--decimate %f")
splitnormals = traits.Bool(desc="Splitting normals is useful for visualizing sharp features. However it creates holes in surfaces which affects measurements.", argstr="--splitnormals ")
pointnormals = traits.Bool(desc="Turn this flag on if you wish to calculate the normal vectors for the points.", argstr="--pointnormals ")
pad = traits.Bool(desc="Pad the input volume with zero value voxels on all 6 faces in order to ensure the production of closed surfaces. Sets the origin translation and extent translation so that the models still line up with the unpadded input volume.", argstr="--pad ")
saveIntermediateModels = traits.Bool(desc="You can save a copy of the models after each of the intermediate steps (marching cubes, smoothing, and decimation if not joint smoothing, otherwise just after decimation). These intermediate models are not saved in the mrml file, you have to load them manually after turning off deleting temporary files in they python console (View ->Python Interactor) using the following command slicer.modules.modelmaker.cliModuleLogic().DeleteTemporaryFilesOff().", argstr="--saveIntermediateModels ")
debug = traits.Bool(desc="turn this flag on in order to see debugging output (look in the Error Log window that is accessed via the View menu)", argstr="--debug ")
class ModelMakerOutputSpec(TraitedSpec):
modelSceneFile = OutputMultiPath(File(exists=True), desc="Generated models, under a model hierarchy node. Models are imported into Slicer under a model hierarchy node, and their colors are set by the color table associated with the input label map volume. The model hierarchy node must be created before running the model maker, by selecting Create New ModelHierarchy from the Models drop down menu. If you're running from the command line, a model hierarchy node in a new mrml scene will be created for you.")
class ModelMaker(SEMLikeCommandLine):
"""title: Model Maker
category: Surface Models
description: Create 3D surface models from segmented data.<p>Models are imported into Slicer under a model hierarchy node in a MRML scene. The model colors are set by the color table associated with the input volume (these colours will only be visible if you load the model scene file).</p><p><b>Create Multiple:</b></p><p>If you specify a list of Labels, it will over ride any start/end label settings.</p><p>If you click<i>Generate All</i>it will over ride the list of lables and any start/end label settings.</p><p><b>Model Maker Settings:</b></p><p>You can set the number of smoothing iterations, target reduction in number of polygons (decimal percentage). Use 0 and 1 if you wish no smoothing nor decimation.<br>You can set the flags to split normals or generate point normals in this pane as well.<br>You can save a copy of the models after intermediate steps (marching cubes, smoothing, and decimation if not joint smoothing, otherwise just after decimation); these models are not saved in the mrml file, turn off deleting temporary files first in the python window:<br><i>slicer.modules.modelmaker.cliModuleLogic().DeleteTemporaryFilesOff()</i></p>
version: 4.1
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ModelMaker
license: slicer4
contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH), Bill Lorensen (GE)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = ModelMakerInputSpec
output_spec = ModelMakerOutputSpec
_cmd = "ModelMaker "
_outputs_filenames = {'modelSceneFile':'modelSceneFile.mrml'}
| [
"[email protected]"
] | |
c893d4f0178ef7371b5ccb8bdde93d4981590a80 | 7699cd22ca370c89fb949eca80c587c0c3a9f8d5 | /clients/opening_quote/high_price.py | cd9e9be7c05aceb3fc8152f88c399ef07a8e4ad4 | [] | no_license | ccliuyang/trader | 8bc2d5f144784c9deca92908c209dc9dd69da4f2 | f7975352bd6c2f34e164a87b03c0cc5e02b48752 | refs/heads/master | 2023-02-10T07:44:24.498417 | 2021-01-12T02:06:08 | 2021-01-12T02:06:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,732 | py | from gevent import monkey; monkey.patch_all()
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), *(['..' + os.sep] * 2))))
import gevent
from clients.common import morning_client
from datetime import datetime, date, timedelta, time
from morning.back_data import holidays
from morning_server import stock_api, message
from gevent.queue import Queue
from pymongo import MongoClient
from configs import db
from morning.pipeline.converter import dt
import pandas as pd
import numpy as np
import daydata
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import order
# Kospi D VI: 3%, KOSDAQ: 6%
# 1. Point, Year High(3), TodayHigh(1), from yesterday to today(2)
# from current price and add most highest candle and check what point you can get
# find strongest momentum (from yesterday to today -> 1 min amount, exceed within 10 sec)
def find_open_time(one_min_dict):
earlist_time = None
for k, v in one_min_dict.items():
for t in v:
if t['market_type'] == 50:
if earlist_time is None:
earlist_time = t['date']
elif earlist_time > t['date']:
earlist_time = t['date']
break
return earlist_time
def find_candidate(tdate, codes, start_datetime=None):
yesterday = holidays.get_yesterday(tdate)
daydata.load_day_data(yesterday, codes, False)
candidate = []
one_minute_dict = {}
codes = list(filter(lambda x: daydata.has_day_data(x), codes))
for code in codes:
start_datetime = datetime.combine(tdate, time(8, 59, 59))
ticks = morning_client.get_tick_data_by_datetime(code, start_datetime, start_datetime + timedelta(minutes=1))
one_minute_dict[code] = ticks
if start_datetime is None:
start_datetime = find_open_time(one_minute_dict)
print(tdate, 'START TICK TIME', start_datetime)
for code in codes:
is_kospi = morning_client.is_kospi_code(code)
quote_amount = 0
open_price = 0
cname = ''
is_started = False
all_amount = 0
bias_amount = 0
for t in one_minute_dict[code]:
if not is_started:
if t['market_type'] == 50:
if t['time'] > 900:
break
quote_amount = t['cum_amount'] * (10000 if is_kospi else 1000)
open_price = t['current_price']
cname = t['company_name']
is_started = True
else:
if t['date'] > start_datetime + timedelta(seconds=10):
candidate.append({'code': code,
'name': cname,
'ratio': quote_amount / daydata.get_yesterday_amount(code),
'starter_ratio': all_amount / quote_amount,
'bias_amount': bias_amount > 0,
'current_percent': (t['current_price'] - open_price) / open_price * 100.0})
break
all_amount += t['volume'] * t['current_price']
if t['buy_or_sell'] == 49:
bias_amount += t['volume'] * t['current_price']
else:
bias_amount -= t['volume'] * t['current_price']
return candidate, start_datetime
def start_trading(tdate, codes, start_datetime):
for code in codes:
print('start_trading', code)
start_datetime = start_datetime + timedelta(seconds=11)
ticks = morning_client.get_tick_data_by_datetime(code, start_datetime, start_datetime + timedelta(minutes=10))
order_tick = ticks[0]
order.add_order(code, order_tick, [order_tick['ask_price'] * 1.03])
for t in ticks[1:-1]:
if not order.check_tick(code, t):
break
order.finalize(code, ticks[-1])
if __name__ == '__main__':
all_codes = morning_client.get_all_market_code() # for is_kospi
#all_codes = ['A326030', 'A002100'] # 8/10 datetime(2020, 8, 10, 8, 59, 59)
#all_codes = ['A128940', 'A060150', 'A005257'] # datetime(2020, 8, 6, 9, 0, 0, 503000)
# 8/7, 8/11 empty
start_dt = datetime(2020, 8, 20).date()
while start_dt <= datetime(2020, 8, 20).date():
if holidays.is_holidays(start_dt) or datetime(2020, 8, 12).date() == start_dt or datetime(2020, 8, 13).date() == start_dt:
start_dt += timedelta(days=1)
continue
tdate = start_dt
candidate, start_datetime = find_candidate(tdate, all_codes, None)
#print(candidate)
sorted_by_ratio = sorted(candidate, key=lambda x: x['ratio'], reverse=True)
sorted_by_ratio = sorted_by_ratio[:20]
sorted_by_ratio = list(filter(lambda x: x['bias_amount'] and 0.5 < x['current_percent'] <= 5, sorted_by_ratio))
#sorted_by_ratio = sorted(sorted_by_ratio, key=lambda x: x['starter_ratio'], reverse=True)
sorted_by_profit = sorted(sorted_by_ratio, key=lambda x: x['current_percent'], reverse=True)
filtered_codes = [t['code'] for t in sorted_by_ratio[:5]]
print(filtered_codes)
start_trading(tdate, filtered_codes, start_datetime)
start_dt += timedelta(days=1)
#df = pd.DataFrame(order._bills)
#df.to_excel('trade_bills.xlsx')
"""
start_trading A326030
ORDER {'code': 'A326030', 'date': datetime.datetime(2020, 8, 10, 9, 0, 11, 8000), 'bought': 186500, 'target': 189297.49999999997}
{'code': 'A326030', 'btime': datetime.datetime(2020, 8, 10, 9, 0, 11, 8000), 'stime': datetime.datetime(2020, 8, 10, 9, 7, 13, 728000), 'bought': 186500, 'sell': 185000, 'profit': '-1.08', 'reason': 'CUT', 'scount': 0, 'fcount': 1}
start_trading A002100
ORDER {'code': 'A002100', 'date': datetime.datetime(2020, 8, 10, 9, 0, 11, 204000), 'bought': 16650, 'target': 16899.75}
{'code': 'A002100', 'btime': datetime.datetime(2020, 8, 10, 9, 0, 11, 204000), 'stime': datetime.datetime(2020, 8, 10, 9, 1, 41, 688000), 'bought': 16650, 'sell': 16900, 'profit': '1.22', 'reason': 'PROFIT', 'scount': 1, 'fcount': 1}
start_trading A185750
ORDER {'code': 'A185750', 'date': datetime.datetime(2020, 8, 10, 9, 0, 11, 47000), 'bought': 177500, 'target': 180162.49999999997}
{'code': 'A185750', 'btime': datetime.datetime(2020, 8, 10, 9, 0, 11, 47000), 'stime': datetime.datetime(2020, 8, 10, 9, 7, 49, 727000), 'bought': 177500, 'sell': 175500, 'profit': '-1.40', 'reason': 'CUT', 'scount': 1, 'fcount': 2}
start_trading A128940
ORDER {'code': 'A128940', 'date': datetime.datetime(2020, 8, 6, 9, 0, 11, 22000), 'bought': 383000, 'target': 388744.99999999994}
{'code': 'A128940', 'btime': datetime.datetime(2020, 8, 6, 9, 0, 11, 22000), 'stime': datetime.datetime(2020, 8, 6, 9, 0, 19, 725000), 'bought': 383000, 'sell': 380000, 'profit': '-1.06', 'reason': 'CUT', 'scount': 0, 'fcount': 1}
start_trading A060150
ORDER {'code': 'A060150', 'date': datetime.datetime(2020, 8, 6, 9, 0, 11, 59000), 'bought': 10500, 'target': 10657.499999999998}
{'code': 'A060150', 'btime': datetime.datetime(2020, 8, 6, 9, 0, 11, 59000), 'stime': datetime.datetime(2020, 8, 6, 9, 0, 19, 999000), 'bought': 10500, 'sell': 10700, 'profit': '1.62', 'reason': 'PROFIT', 'scount': 1, 'fcount': 1}
start_trading A005257
ORDER {'code': 'A005257', 'date': datetime.datetime(2020, 8, 6, 9, 0, 12, 47000), 'bought': 247000, 'target': 250704.99999999997}
{'code': 'A005257', 'btime': datetime.datetime(2020, 8, 6, 9, 0, 12, 47000), 'stime': datetime.datetime(2020, 8, 6, 9, 0, 41, 577000), 'bought': 247000, 'sell': 251500, 'profit': '1.54', 'reason': 'PROFIT', 'scount': 2, 'fcount': 1}
"""
| [
"[email protected]"
] | |
31b8195ebc6b66e72576c39573db54f394df7e49 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04013/s491767407.py | 0a1b3009b328e28350daacffd06009c1c38057f1 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | N, A = map(int, input().split()) # N枚数のカード, 平均をAにしたい
X = tuple(map(int, input().split())) # N枚のカードの中身
dp = [[[0]*(50*N+2) for _ in range(N+2)] for _ in range(N+2)]
dp[0][0][0] = 1 # 0枚の中から0枚選んで合計が0になる選び方が1通りある
for i in range(N):
for j in range(N+1):
for k in range(50*N+1):
# if dp[i][j][k]: # ここの分岐が分からん
if k+X[i] < 50*N+2:
dp[i+1][j][k] += dp[i][j][k]
dp[i+1][j+1][k+X[i]] += dp[i][j][k]
ans = 0
for i in range(1, N+1):
ans += dp[N][i][i*A]
print(ans)
| [
"[email protected]"
] | |
fa6e3b81e58e6880620d69439c3e6617bb0f2d24 | a0f0efaaaf69d6ccdc2a91596db29f04025f122c | /build/mongodb_store_msgs/devel/lib/python2.7/dist-packages/mongodb_store_msgs/msg/_MoveEntriesAction.py | beec80f857d14c89cc008cb09202c6c3138385cb | [] | no_license | chiuhandsome/ros_ws_test-git | 75da2723154c0dadbcec8d7b3b1f3f8b49aa5cd6 | 619909130c23927ccc902faa3ff6d04ae0f0fba9 | refs/heads/master | 2022-12-24T05:45:43.845717 | 2020-09-22T10:12:54 | 2020-09-22T10:12:54 | 297,582,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,544 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from mongodb_store_msgs/MoveEntriesAction.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import actionlib_msgs.msg
import genpy
import mongodb_store_msgs.msg
import std_msgs.msg
class MoveEntriesAction(genpy.Message):
_md5sum = "603d33caf9a321e4af460957d0a9266a"
_type = "mongodb_store_msgs/MoveEntriesAction"
_has_header = False # flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
MoveEntriesActionGoal action_goal
MoveEntriesActionResult action_result
MoveEntriesActionFeedback action_feedback
================================================================================
MSG: mongodb_store_msgs/MoveEntriesActionGoal
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalID goal_id
MoveEntriesGoal goal
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
================================================================================
MSG: actionlib_msgs/GoalID
# The stamp should store the time at which this goal was requested.
# It is used by an action server when it tries to preempt all
# goals that were requested before a certain time
time stamp
# The id provides a way to associate feedback and
# result message with specific goal requests. The id
# specified must be unique.
string id
================================================================================
MSG: mongodb_store_msgs/MoveEntriesGoal
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
# the db to move entries from
string database
# the collections to move entries from
StringList collections
# only entries before rospy.get_rostime() - move_before are moved. if 0, all are moved
duration move_before
# delete moved entries after replication
bool delete_after_move
# query to move entries by
StringPairList query
================================================================================
MSG: mongodb_store_msgs/StringList
string[] data
================================================================================
MSG: mongodb_store_msgs/StringPairList
StringPair[] pairs
================================================================================
MSG: mongodb_store_msgs/StringPair
string first
string second
================================================================================
MSG: mongodb_store_msgs/MoveEntriesActionResult
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalStatus status
MoveEntriesResult result
================================================================================
MSG: actionlib_msgs/GoalStatus
GoalID goal_id
uint8 status
uint8 PENDING = 0 # The goal has yet to be processed by the action server
uint8 ACTIVE = 1 # The goal is currently being processed by the action server
uint8 PREEMPTED = 2 # The goal received a cancel request after it started executing
# and has since completed its execution (Terminal State)
uint8 SUCCEEDED = 3 # The goal was achieved successfully by the action server (Terminal State)
uint8 ABORTED = 4 # The goal was aborted during execution by the action server due
# to some failure (Terminal State)
uint8 REJECTED = 5 # The goal was rejected by the action server without being processed,
# because the goal was unattainable or invalid (Terminal State)
uint8 PREEMPTING = 6 # The goal received a cancel request after it started executing
# and has not yet completed execution
uint8 RECALLING = 7 # The goal received a cancel request before it started executing,
# but the action server has not yet confirmed that the goal is canceled
uint8 RECALLED = 8 # The goal received a cancel request before it started executing
# and was successfully cancelled (Terminal State)
uint8 LOST = 9 # An action client can determine that a goal is LOST. This should not be
# sent over the wire by an action server
#Allow for the user to associate a string with GoalStatus for debugging
string text
================================================================================
MSG: mongodb_store_msgs/MoveEntriesResult
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
================================================================================
MSG: mongodb_store_msgs/MoveEntriesActionFeedback
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalStatus status
MoveEntriesFeedback feedback
================================================================================
MSG: mongodb_store_msgs/MoveEntriesFeedback
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
# the collections which have been operated on so far
string[] completed
"""
__slots__ = ['action_goal','action_result','action_feedback']
_slot_types = ['mongodb_store_msgs/MoveEntriesActionGoal','mongodb_store_msgs/MoveEntriesActionResult','mongodb_store_msgs/MoveEntriesActionFeedback']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
action_goal,action_result,action_feedback
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(MoveEntriesAction, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.action_goal is None:
self.action_goal = mongodb_store_msgs.msg.MoveEntriesActionGoal()
if self.action_result is None:
self.action_result = mongodb_store_msgs.msg.MoveEntriesActionResult()
if self.action_feedback is None:
self.action_feedback = mongodb_store_msgs.msg.MoveEntriesActionFeedback()
else:
self.action_goal = mongodb_store_msgs.msg.MoveEntriesActionGoal()
self.action_result = mongodb_store_msgs.msg.MoveEntriesActionResult()
self.action_feedback = mongodb_store_msgs.msg.MoveEntriesActionFeedback()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.action_goal.header.seq, _x.action_goal.header.stamp.secs, _x.action_goal.header.stamp.nsecs))
_x = self.action_goal.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.action_goal.goal_id.stamp.secs, _x.action_goal.goal_id.stamp.nsecs))
_x = self.action_goal.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.action_goal.goal.database
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.action_goal.goal.collections.data)
buff.write(_struct_I.pack(length))
for val1 in self.action_goal.goal.collections.data:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.pack('<I%ss'%length, length, val1))
_x = self
buff.write(_get_struct_2iB().pack(_x.action_goal.goal.move_before.secs, _x.action_goal.goal.move_before.nsecs, _x.action_goal.goal.delete_after_move))
length = len(self.action_goal.goal.query.pairs)
buff.write(_struct_I.pack(length))
for val1 in self.action_goal.goal.query.pairs:
_x = val1.first
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.second
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.action_result.header.seq, _x.action_result.header.stamp.secs, _x.action_result.header.stamp.nsecs))
_x = self.action_result.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.action_result.status.goal_id.stamp.secs, _x.action_result.status.goal_id.stamp.nsecs))
_x = self.action_result.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.action_result.status.status
buff.write(_get_struct_B().pack(_x))
_x = self.action_result.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.action_feedback.header.seq, _x.action_feedback.header.stamp.secs, _x.action_feedback.header.stamp.nsecs))
_x = self.action_feedback.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.action_feedback.status.goal_id.stamp.secs, _x.action_feedback.status.goal_id.stamp.nsecs))
_x = self.action_feedback.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.action_feedback.status.status
buff.write(_get_struct_B().pack(_x))
_x = self.action_feedback.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.action_feedback.feedback.completed)
buff.write(_struct_I.pack(length))
for val1 in self.action_feedback.feedback.completed:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.pack('<I%ss'%length, length, val1))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.action_goal is None:
self.action_goal = mongodb_store_msgs.msg.MoveEntriesActionGoal()
if self.action_result is None:
self.action_result = mongodb_store_msgs.msg.MoveEntriesActionResult()
if self.action_feedback is None:
self.action_feedback = mongodb_store_msgs.msg.MoveEntriesActionFeedback()
end = 0
_x = self
start = end
end += 12
(_x.action_goal.header.seq, _x.action_goal.header.stamp.secs, _x.action_goal.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_goal.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_goal.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.action_goal.goal_id.stamp.secs, _x.action_goal.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_goal.goal_id.id = str[start:end].decode('utf-8')
else:
self.action_goal.goal_id.id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_goal.goal.database = str[start:end].decode('utf-8')
else:
self.action_goal.goal.database = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.action_goal.goal.collections.data = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.action_goal.goal.collections.data.append(val1)
_x = self
start = end
end += 9
(_x.action_goal.goal.move_before.secs, _x.action_goal.goal.move_before.nsecs, _x.action_goal.goal.delete_after_move,) = _get_struct_2iB().unpack(str[start:end])
self.action_goal.goal.delete_after_move = bool(self.action_goal.goal.delete_after_move)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.action_goal.goal.query.pairs = []
for i in range(0, length):
val1 = mongodb_store_msgs.msg.StringPair()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.first = str[start:end].decode('utf-8')
else:
val1.first = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.second = str[start:end].decode('utf-8')
else:
val1.second = str[start:end]
self.action_goal.goal.query.pairs.append(val1)
_x = self
start = end
end += 12
(_x.action_result.header.seq, _x.action_result.header.stamp.secs, _x.action_result.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_result.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_result.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.action_result.status.goal_id.stamp.secs, _x.action_result.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_result.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.action_result.status.goal_id.id = str[start:end]
start = end
end += 1
(self.action_result.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_result.status.text = str[start:end].decode('utf-8')
else:
self.action_result.status.text = str[start:end]
_x = self
start = end
end += 12
(_x.action_feedback.header.seq, _x.action_feedback.header.stamp.secs, _x.action_feedback.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_feedback.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_feedback.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.action_feedback.status.goal_id.stamp.secs, _x.action_feedback.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_feedback.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.action_feedback.status.goal_id.id = str[start:end]
start = end
end += 1
(self.action_feedback.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_feedback.status.text = str[start:end].decode('utf-8')
else:
self.action_feedback.status.text = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.action_feedback.feedback.completed = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.action_feedback.feedback.completed.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.action_goal.header.seq, _x.action_goal.header.stamp.secs, _x.action_goal.header.stamp.nsecs))
_x = self.action_goal.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.action_goal.goal_id.stamp.secs, _x.action_goal.goal_id.stamp.nsecs))
_x = self.action_goal.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.action_goal.goal.database
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.action_goal.goal.collections.data)
buff.write(_struct_I.pack(length))
for val1 in self.action_goal.goal.collections.data:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.pack('<I%ss'%length, length, val1))
_x = self
buff.write(_get_struct_2iB().pack(_x.action_goal.goal.move_before.secs, _x.action_goal.goal.move_before.nsecs, _x.action_goal.goal.delete_after_move))
length = len(self.action_goal.goal.query.pairs)
buff.write(_struct_I.pack(length))
for val1 in self.action_goal.goal.query.pairs:
_x = val1.first
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.second
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.action_result.header.seq, _x.action_result.header.stamp.secs, _x.action_result.header.stamp.nsecs))
_x = self.action_result.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.action_result.status.goal_id.stamp.secs, _x.action_result.status.goal_id.stamp.nsecs))
_x = self.action_result.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.action_result.status.status
buff.write(_get_struct_B().pack(_x))
_x = self.action_result.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.action_feedback.header.seq, _x.action_feedback.header.stamp.secs, _x.action_feedback.header.stamp.nsecs))
_x = self.action_feedback.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.action_feedback.status.goal_id.stamp.secs, _x.action_feedback.status.goal_id.stamp.nsecs))
_x = self.action_feedback.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.action_feedback.status.status
buff.write(_get_struct_B().pack(_x))
_x = self.action_feedback.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.action_feedback.feedback.completed)
buff.write(_struct_I.pack(length))
for val1 in self.action_feedback.feedback.completed:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.pack('<I%ss'%length, length, val1))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.action_goal is None:
self.action_goal = mongodb_store_msgs.msg.MoveEntriesActionGoal()
if self.action_result is None:
self.action_result = mongodb_store_msgs.msg.MoveEntriesActionResult()
if self.action_feedback is None:
self.action_feedback = mongodb_store_msgs.msg.MoveEntriesActionFeedback()
end = 0
_x = self
start = end
end += 12
(_x.action_goal.header.seq, _x.action_goal.header.stamp.secs, _x.action_goal.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_goal.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_goal.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.action_goal.goal_id.stamp.secs, _x.action_goal.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_goal.goal_id.id = str[start:end].decode('utf-8')
else:
self.action_goal.goal_id.id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_goal.goal.database = str[start:end].decode('utf-8')
else:
self.action_goal.goal.database = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.action_goal.goal.collections.data = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.action_goal.goal.collections.data.append(val1)
_x = self
start = end
end += 9
(_x.action_goal.goal.move_before.secs, _x.action_goal.goal.move_before.nsecs, _x.action_goal.goal.delete_after_move,) = _get_struct_2iB().unpack(str[start:end])
self.action_goal.goal.delete_after_move = bool(self.action_goal.goal.delete_after_move)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.action_goal.goal.query.pairs = []
for i in range(0, length):
val1 = mongodb_store_msgs.msg.StringPair()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.first = str[start:end].decode('utf-8')
else:
val1.first = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.second = str[start:end].decode('utf-8')
else:
val1.second = str[start:end]
self.action_goal.goal.query.pairs.append(val1)
_x = self
start = end
end += 12
(_x.action_result.header.seq, _x.action_result.header.stamp.secs, _x.action_result.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_result.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_result.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.action_result.status.goal_id.stamp.secs, _x.action_result.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_result.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.action_result.status.goal_id.id = str[start:end]
start = end
end += 1
(self.action_result.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_result.status.text = str[start:end].decode('utf-8')
else:
self.action_result.status.text = str[start:end]
_x = self
start = end
end += 12
(_x.action_feedback.header.seq, _x.action_feedback.header.stamp.secs, _x.action_feedback.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_feedback.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_feedback.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.action_feedback.status.goal_id.stamp.secs, _x.action_feedback.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_feedback.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.action_feedback.status.goal_id.id = str[start:end]
start = end
end += 1
(self.action_feedback.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_feedback.status.text = str[start:end].decode('utf-8')
else:
self.action_feedback.status.text = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.action_feedback.feedback.completed = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.action_feedback.feedback.completed.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
_struct_2iB = None
def _get_struct_2iB():
global _struct_2iB
if _struct_2iB is None:
_struct_2iB = struct.Struct("<2iB")
return _struct_2iB
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
| [
"[email protected]"
] | |
05908f9e878730f361c397fe8138bb1d4e96e6e4 | f4e45e2f6a6c42571eefdc64773ca83c6b9c2b98 | /lib/telepot2/filtering.py | 3b6aca406e3f6ffe1af3f1a3657701be9b06abc3 | [] | no_license | soju6jan2/sjva2_src_obfuscate | 83659707ca16d94378b7eff4d20e5e7ccf224007 | e2dd6c733bbf34b444362011f11b5aca2053aa34 | refs/heads/master | 2023-04-21T12:27:01.132955 | 2021-05-06T17:35:03 | 2021-05-06T17:35:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | def pick(obj,keys):
def pick1(k):
if type(obj)is dict:
return obj[k]
else:
return getattr(obj,k)
if isinstance(keys,list):
return[pick1(k)for k in keys]
else:
return pick1(keys)
def match(data,template):
if isinstance(template,dict)and isinstance(data,dict):
def pick_and_match(kv):
template_key,template_value=kv
if hasattr(template_key,'search'):
data_keys=list(filter(template_key.search,data.keys()))
if not data_keys:
return False
elif template_key in data:
data_keys=[template_key]
else:
return False
return any(map(lambda data_value:match(data_value,template_value),pick(data,data_keys)))
return all(map(pick_and_match,template.items()))
elif callable(template):
return template(data)
else:
return data==template
def match_all(msg,templates):
return all(map(lambda t:match(msg,t),templates))
# Created by pyminifier (https://github.com/liftoff/pyminifier)
| [
"[email protected]"
] | |
1624a5f97468afb319da8fcebad9fbc440a0f1cb | 60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24 | /IronPythonStubs/release/stubs.min/Autodesk/Revit/UI/Mechanical.py | 7918f6d2c82c384d607a5c89c4a612322313f8a0 | [
"MIT"
] | permissive | shnlmn/Rhino-Grasshopper-Scripts | a9411098c5d1bbc55feb782def565d535b27b709 | 0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823 | refs/heads/master | 2020-04-10T18:59:43.518140 | 2020-04-08T02:49:07 | 2020-04-08T02:49:07 | 161,219,695 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,755 | py | # encoding: utf-8
# module Autodesk.Revit.UI.Mechanical calls itself Mechanical
# from RevitAPIUI,Version=17.0.0.0,Culture=neutral,PublicKeyToken=null
# by generator 1.145
# no doc
# no imports
# no functions
# classes
class DuctFittingAndAccessoryPressureDropUIData(object,IDisposable):
""" The input and output data used by external UI servers for storing UI settings. """
def Dispose(self):
""" Dispose(self: DuctFittingAndAccessoryPressureDropUIData) """
pass
def GetUIDataItems(self):
"""
GetUIDataItems(self: DuctFittingAndAccessoryPressureDropUIData) -> IList[DuctFittingAndAccessoryPressureDropUIDataItem]
Gets all UI data items stored in the UI data.
Returns: An array of UI data items.
"""
pass
def GetUnits(self):
"""
GetUnits(self: DuctFittingAndAccessoryPressureDropUIData) -> Units
Gets units.
Returns: The Units object.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: DuctFittingAndAccessoryPressureDropUIData,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: DuctFittingAndAccessoryPressureDropUIData) -> bool
"""
class DuctFittingAndAccessoryPressureDropUIDataItem(object,IDisposable):
""" Each duct fitting or duct accessory FamilyInstance has one DuctFittingAndAccessoryPressureDropUIDataItem. """
def Dispose(self):
""" Dispose(self: DuctFittingAndAccessoryPressureDropUIDataItem) """
pass
def GetDuctFittingAndAccessoryData(self):
"""
GetDuctFittingAndAccessoryData(self: DuctFittingAndAccessoryPressureDropUIDataItem) -> DuctFittingAndAccessoryData
Gets the fitting data stored in the UI data item.
Returns: The fitting data stored in the UI data item.
"""
pass
def GetEntity(self):
"""
GetEntity(self: DuctFittingAndAccessoryPressureDropUIDataItem) -> Entity
Returns the entity set by UI server.
or an invalid entity otherwise.
Returns: The returned Entity.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: DuctFittingAndAccessoryPressureDropUIDataItem,disposing: bool) """
pass
def SetEntity(self,entity):
"""
SetEntity(self: DuctFittingAndAccessoryPressureDropUIDataItem,entity: Entity)
Stores the entity in the UI data item.
entity: The Entity to be stored.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: DuctFittingAndAccessoryPressureDropUIDataItem) -> bool
"""
class IDuctFittingAndAccessoryPressureDropUIServer(IExternalServer):
""" Interface for external servers providing optional UI for duct fitting and duct accessory coefficient calculation. """
def GetDBServerId(self):
"""
GetDBServerId(self: IDuctFittingAndAccessoryPressureDropUIServer) -> Guid
Returns the Id of the corresponding DB server for which this server provides an
optional UI.
Returns: The Id of the DB server.
"""
pass
def ShowSettings(self,data):
"""
ShowSettings(self: IDuctFittingAndAccessoryPressureDropUIServer,data: DuctFittingAndAccessoryPressureDropUIData) -> bool
Shows the settings UI.
data: The input data of the calculation.
Returns: True if the user makes any changes in the UI,false otherwise.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
| [
"[email protected]"
] | |
baa865fa991c54891788aec59bcb183d5b563a5f | 655a0c0de7b518b1b0b978fd17cd6da8576db9af | /src/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy_pb2.py | 63c456b68de143878c07a08f1ba2bafc34d5fb8d | [
"Apache-2.0"
] | permissive | opensvc/igw_envoy | 1fe803e02f4166a6ce1d21e9f4f26e4184ce0694 | 939f8c1eac242156c9b179fa067d23be63e65167 | refs/heads/master | 2023-04-08T06:01:21.715904 | 2023-03-15T14:18:43 | 2023-03-15T14:18:43 | 155,705,611 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | true | 10,707 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from envoy.config.filter.network.dubbo_proxy.v2alpha1 import route_pb2 as envoy_dot_config_dot_filter_dot_network_dot_dubbo__proxy_dot_v2alpha1_dot_route__pb2
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
from validate import validate_pb2 as validate_dot_validate__pb2
from gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto',
package='envoy.config.filter.network.dubbo_proxy.v2alpha1',
syntax='proto3',
serialized_pb=_b('\nBenvoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto\x12\x30\x65nvoy.config.filter.network.dubbo_proxy.v2alpha1\x1a<envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto\x1a\x19google/protobuf/any.proto\x1a\x17validate/validate.proto\x1a\x14gogoproto/gogo.proto\"\xae\x03\n\nDubboProxy\x12\x1e\n\x0bstat_prefix\x18\x01 \x01(\tB\t\xba\xe9\xc0\x03\x04r\x02 \x01\x12\x61\n\rprotocol_type\x18\x02 \x01(\x0e\x32>.envoy.config.filter.network.dubbo_proxy.v2alpha1.ProtocolTypeB\n\xba\xe9\xc0\x03\x05\x82\x01\x02\x10\x01\x12k\n\x12serialization_type\x18\x03 \x01(\x0e\x32\x43.envoy.config.filter.network.dubbo_proxy.v2alpha1.SerializationTypeB\n\xba\xe9\xc0\x03\x05\x82\x01\x02\x10\x01\x12Z\n\x0croute_config\x18\x04 \x03(\x0b\x32\x44.envoy.config.filter.network.dubbo_proxy.v2alpha1.RouteConfiguration\x12T\n\rdubbo_filters\x18\x05 \x03(\x0b\x32=.envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboFilter\"L\n\x0b\x44ubboFilter\x12\x17\n\x04name\x18\x01 \x01(\tB\t\xba\xe9\xc0\x03\x04r\x02 \x01\x12$\n\x06\x63onfig\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any*\x19\n\x0cProtocolType\x12\t\n\x05\x44ubbo\x10\x00*!\n\x11SerializationType\x12\x0c\n\x08Hessian2\x10\x00\x42W\n>io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v2alpha1B\x0f\x44ubboProxyProtoP\x01Z\x02v2b\x06proto3')
,
dependencies=[envoy_dot_config_dot_filter_dot_network_dot_dubbo__proxy_dot_v2alpha1_dot_route__pb2.DESCRIPTOR,google_dot_protobuf_dot_any__pb2.DESCRIPTOR,validate_dot_validate__pb2.DESCRIPTOR,gogoproto_dot_gogo__pb2.DESCRIPTOR,])
_PROTOCOLTYPE = _descriptor.EnumDescriptor(
name='ProtocolType',
full_name='envoy.config.filter.network.dubbo_proxy.v2alpha1.ProtocolType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='Dubbo', index=0, number=0,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=767,
serialized_end=792,
)
_sym_db.RegisterEnumDescriptor(_PROTOCOLTYPE)
ProtocolType = enum_type_wrapper.EnumTypeWrapper(_PROTOCOLTYPE)
_SERIALIZATIONTYPE = _descriptor.EnumDescriptor(
name='SerializationType',
full_name='envoy.config.filter.network.dubbo_proxy.v2alpha1.SerializationType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='Hessian2', index=0, number=0,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=794,
serialized_end=827,
)
_sym_db.RegisterEnumDescriptor(_SERIALIZATIONTYPE)
SerializationType = enum_type_wrapper.EnumTypeWrapper(_SERIALIZATIONTYPE)
Dubbo = 0
Hessian2 = 0
_DUBBOPROXY = _descriptor.Descriptor(
name='DubboProxy',
full_name='envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboProxy',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='stat_prefix', full_name='envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboProxy.stat_prefix', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\272\351\300\003\004r\002 \001'))),
_descriptor.FieldDescriptor(
name='protocol_type', full_name='envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboProxy.protocol_type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\272\351\300\003\005\202\001\002\020\001'))),
_descriptor.FieldDescriptor(
name='serialization_type', full_name='envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboProxy.serialization_type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\272\351\300\003\005\202\001\002\020\001'))),
_descriptor.FieldDescriptor(
name='route_config', full_name='envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboProxy.route_config', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dubbo_filters', full_name='envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboProxy.dubbo_filters', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=257,
serialized_end=687,
)
_DUBBOFILTER = _descriptor.Descriptor(
name='DubboFilter',
full_name='envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboFilter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboFilter.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\272\351\300\003\004r\002 \001'))),
_descriptor.FieldDescriptor(
name='config', full_name='envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboFilter.config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=689,
serialized_end=765,
)
_DUBBOPROXY.fields_by_name['protocol_type'].enum_type = _PROTOCOLTYPE
_DUBBOPROXY.fields_by_name['serialization_type'].enum_type = _SERIALIZATIONTYPE
_DUBBOPROXY.fields_by_name['route_config'].message_type = envoy_dot_config_dot_filter_dot_network_dot_dubbo__proxy_dot_v2alpha1_dot_route__pb2._ROUTECONFIGURATION
_DUBBOPROXY.fields_by_name['dubbo_filters'].message_type = _DUBBOFILTER
_DUBBOFILTER.fields_by_name['config'].message_type = google_dot_protobuf_dot_any__pb2._ANY
DESCRIPTOR.message_types_by_name['DubboProxy'] = _DUBBOPROXY
DESCRIPTOR.message_types_by_name['DubboFilter'] = _DUBBOFILTER
DESCRIPTOR.enum_types_by_name['ProtocolType'] = _PROTOCOLTYPE
DESCRIPTOR.enum_types_by_name['SerializationType'] = _SERIALIZATIONTYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DubboProxy = _reflection.GeneratedProtocolMessageType('DubboProxy', (_message.Message,), dict(
DESCRIPTOR = _DUBBOPROXY,
__module__ = 'envoy.config.filter.network.dubbo_proxy.v2alpha1.dubbo_proxy_pb2'
# @@protoc_insertion_point(class_scope:envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboProxy)
))
_sym_db.RegisterMessage(DubboProxy)
DubboFilter = _reflection.GeneratedProtocolMessageType('DubboFilter', (_message.Message,), dict(
DESCRIPTOR = _DUBBOFILTER,
__module__ = 'envoy.config.filter.network.dubbo_proxy.v2alpha1.dubbo_proxy_pb2'
# @@protoc_insertion_point(class_scope:envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboFilter)
))
_sym_db.RegisterMessage(DubboFilter)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n>io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v2alpha1B\017DubboProxyProtoP\001Z\002v2'))
_DUBBOPROXY.fields_by_name['stat_prefix'].has_options = True
_DUBBOPROXY.fields_by_name['stat_prefix']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\272\351\300\003\004r\002 \001'))
_DUBBOPROXY.fields_by_name['protocol_type'].has_options = True
_DUBBOPROXY.fields_by_name['protocol_type']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\272\351\300\003\005\202\001\002\020\001'))
_DUBBOPROXY.fields_by_name['serialization_type'].has_options = True
_DUBBOPROXY.fields_by_name['serialization_type']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\272\351\300\003\005\202\001\002\020\001'))
_DUBBOFILTER.fields_by_name['name'].has_options = True
_DUBBOFILTER.fields_by_name['name']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\272\351\300\003\004r\002 \001'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
a76fbfcbd6a9727bd7e8f2f90802a7d1f3d3dfec | bdb3716c644b8d031af9a5285626d7ccf0ecb903 | /code/UI/OpenAPI/python-flask-server/KG2/openapi_server/test/test_entity_controller.py | e780d02dcb6fb787d438c3fd38e7c36eba951a02 | [
"MIT",
"Apache-2.0"
] | permissive | RTXteam/RTX | 97d2a8946d233d48cc1b165f5e575af21bda4b26 | ed0693dd03149e56f7dfaf431fb8a82ace0c4ef3 | refs/heads/master | 2023-09-01T21:48:49.008407 | 2023-09-01T20:55:06 | 2023-09-01T20:55:06 | 111,240,202 | 43 | 31 | MIT | 2023-09-14T16:20:01 | 2017-11-18T21:19:13 | Python | UTF-8 | Python | false | false | 872 | py | # coding: utf-8
from __future__ import absolute_import
import unittest
from flask import json
from six import BytesIO
from openapi_server.test import BaseTestCase
class TestEntityController(BaseTestCase):
"""EntityController integration test stubs"""
def test_get_entity(self):
"""Test case for get_entity
Obtain CURIE and synonym information about a search term
"""
query_string = [('q', ["MESH:D014867","NCIT:C34373"])]
headers = {
'Accept': 'application/json',
}
response = self.client.open(
'/api/rtxkg2/v1.0/entity',
method='GET',
headers=headers,
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
273c9a78a2f06229134518a5ee0ee75c043c5e8f | cbd2eee46663fad5b5375b13c8c21b1b06eb4c6b | /ecloud/code/src/main/python/easted/network/__init__.py | dd0a418f0f5c9bd47a8e408f7d9ea0c723488ad7 | [] | no_license | 1026237416/Python | ef474ee40d7efcd6dabb6fb0ecba81b4dcfc7e14 | ffa8f9ffb8bfec114b0ca46295db05c4213c4c30 | refs/heads/master | 2021-07-05T00:57:00.456886 | 2019-04-26T10:13:46 | 2019-04-26T10:13:46 | 114,510,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | # -*- coding: utf-8 -*-
from exception import *
from network import *
from networkdao import *
from networkhost import *
from subnet import *
from subnetdao import *
from subnet import *
from tenant_subnet import *
from tenant_host import *
from common import request_create_ports, request_delete_ports
| [
"[email protected]"
] | |
fdb12c98cca4cd1b2931ec80ce4bc5bd1cfc0bc4 | e902470b1e6dad9be93631d3663382082f2b3221 | /supervised_learning/0x03-optimization/12-learning_rate_decay.py | 5350524cf17e57663bd160289bdbecd21c9bff91 | [] | no_license | BrianFs04/holbertonschool-machine_learning | 11bb645d86a0de74434d37bb36a239534e7d0787 | d9b5fa4d60cd896c42242d9e72c348bd33046fba | refs/heads/master | 2022-12-30T07:13:12.868358 | 2020-10-03T22:40:36 | 2020-10-03T22:40:36 | 279,386,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | #!/usr/bin/env python3
"""learning_rate_decay"""
import tensorflow as tf
def learning_rate_decay(alpha, decay_rate, global_step, decay_step):
"""Creates a learning rate decay operation"""
rate_op = tf.train.inverse_time_decay(alpha, global_step, decay_step,
decay_rate, True)
return(rate_op)
| [
"[email protected]"
] | |
7bbba9fa880f15ce4c10fba89b19daed63c4b17e | c9c1ac74238bd9ce8598af9ec4a52baae3cd4c26 | /pkg/clm/DEBIAN/prerm | faadaf2324fcfda30cabf6dac71288502af74c20 | [
"Apache-2.0"
] | permissive | cloudcache/cc1 | 4dbd9b5931483eed2d62459546c502c2d0ceb513 | 360392995a8aea37573dd772d809a006b78f575b | refs/heads/master | 2021-01-24T04:13:54.670317 | 2014-06-06T08:17:46 | 2014-06-06T08:17:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | #!/usr/bin/python
import sys
import subprocess
import netifaces
if __name__ == '__main__':
if 'remove' in sys.argv:
log = open('/var/log/cc1/cm_install.log', 'a')
r = subprocess.call('cc1_clm_setup_config remove', shell=True, stdout=log)
r = subprocess.call('cc1_clm_setup_db_psql remove', shell=True, stdout=log)
r = subprocess.call('cc1_clm_setup_apache disable', shell=True, stdout=log)
log.close()
sys.exit(0)
elif 'purge' in sys.argv:
log = open('/var/log/cc1/cm_install.log', 'a')
r = subprocess.call('cc1_clm_setup_config purge', shell=True, stdout=log)
r = subprocess.call('cc1_clm_setup_db_psql purge', shell=True, stdout=log)
r = subprocess.call('cc1_clm_setup_apache purge', shell=True, stdout=log)
log.close()
sys.exit(0)
else:
print "Use cc1 tools (cc1_...) to reconfigure services!"
sys.exit(0)
| [
"[email protected]"
] | ||
dd02bba44f176099278785f7272ae549e975f3f3 | fa66cc0f9fba22da4b2a3291e804d358e88e0a47 | /class6/exercises/ex2a_yaml_inventory.py | 91ed3c734ef90b7de41327c17867727c82f012dd | [
"Apache-2.0"
] | permissive | bminus87/pyplus_course | d6aa2246b9882454bca029a0c5d807b461b89561 | 46fcf460df0e06c0df563822ba604c78b75043ae | refs/heads/master | 2020-05-30T00:53:19.291358 | 2019-05-30T19:12:28 | 2019-05-30T19:12:28 | 189,467,288 | 0 | 0 | Apache-2.0 | 2019-05-30T19:00:36 | 2019-05-30T19:00:36 | null | UTF-8 | Python | false | false | 928 | py | import pyeapi
import yaml
from getpass import getpass
def yaml_load_devices(filename="arista_devices.yml"):
with open(filename, "r") as f:
return yaml.safe_load(f)
raise ValueError("Reading YAML file failed")
def main():
devices = yaml_load_devices()
password = getpass()
for name, device_dict in devices.items():
device_dict["password"] = password
connection = pyeapi.client.connect(**device_dict)
device = pyeapi.client.Node(connection)
output = device.enable("show ip arp")
print()
print("-" * 40)
arp_list = output[0]["result"]["ipV4Neighbors"]
for arp_entry in arp_list:
mac_address = arp_entry["hwAddress"]
ip_address = arp_entry["address"]
print("{:^15}{:^5}{:^15}".format(ip_address, "-->", mac_address))
print("-" * 40)
print()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
5487a5680a39929ea9cadae8ca2975340c24f0cc | 83e42b592923e56b99ff16d3762e89ffb29a75dc | /collective/abovecontentportlets/abovecontentportlets.py | b69467fe1ef6b3010757bc81b282e20085aa35e7 | [] | no_license | intk/collective.abovecontentportlets | 996a1b2bac8c7a471b0e26287f08d87298b4a277 | 460c68e7e66622c355356e6f84e733e8d979ec11 | refs/heads/master | 2021-01-20T18:39:44.176794 | 2016-06-28T13:53:27 | 2016-06-28T13:53:27 | 62,136,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py | from five import grok
from z3c.form import group, field
from zope import schema
from zope.interface import invariant, Invalid
from zope.schema.interfaces import IContextSourceBinder
from zope.schema.vocabulary import SimpleVocabulary, SimpleTerm
from plone.dexterity.content import Container
from plone.directives import dexterity, form
from plone.app.textfield import RichText
from plone.namedfile.field import NamedImage, NamedFile
from plone.namedfile.field import NamedBlobImage, NamedBlobFile
from plone.namedfile.interfaces import IImageScaleTraversable
from plone.dexterity.browser.view import DefaultView
from zope.interface import implementer
from collective.abovecontentportlets import MessageFactory as _
from datetime import date
from zope.component import queryMultiAdapter
from plone.app.layout.viewlets.common import ViewletBase
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from zope.component import getMultiAdapter
class AboveContentPortletsViewlet(ViewletBase):
index = ViewPageTemplateFile('abovecontentportlets_templates/portlet.pt')
def update(self):
super(AboveContentPortletsViewlet, self).update()
self.year = date.today().year
def render_abovecontent_portlets(self):
portlet_manager = getMultiAdapter(
(self.context, self.request, self.__parent__),
name='collective.abovecontentportlets'
)
portlet_manager.update()
return portlet_manager.render() | [
"[email protected]"
] | |
f2e1711dfdbe726e45e948694eb6a02760f26fb7 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_370/ch20_2019_08_19_13_59_53_782660.py | ea6bf7b829c953e0f9b8066ff16007d74678b664 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | NOME=input('Qual o seu nome? ')
if NOME == 'Chris':
print('Todo mundo odeia o Chris')
else:
print('Olá, '+ NOME) | [
"[email protected]"
] | |
418fc3f76fbbd227adf1da0a03818bf10b7febf0 | 17edb3537087dffeabfd9694c71ddf878088713c | /django-master/tests/model_meta/test.py | 8fd0a6bb330336b36f5f293e094a4d39abdbd213 | [
"BSD-3-Clause"
] | permissive | BrianBGrant/DNDC | dbb9004c7cddbb34fb16ca94992fc76127c4b588 | d0cdec5c5436f6fe1fd55d6bae093c21ccb27f6b | refs/heads/master | 2020-04-15T19:43:33.351799 | 2014-10-02T16:48:44 | 2014-10-02T16:48:44 | 42,128,284 | 2 | 1 | null | 2015-09-08T17:34:42 | 2015-09-08T17:34:41 | null | UTF-8 | Python | false | false | 24,972 | py | from django import test
from django.db.models.fields import related, CharField, Field, FieldDoesNotExist
from django.contrib.contenttypes.fields import GenericRelation
from .models import (
AbstractPerson, BasePerson, Person, Relating, Relation
)
TEST_RESULTS = {
'fields': {
Person: [
'id',
'data_abstract',
'fk_abstract_id',
'data_not_concrete_abstract',
'content_type_abstract_id',
'object_id_abstract',
'data_base',
'fk_base_id',
'data_not_concrete_base',
'content_type_base_id',
'object_id_base',
'baseperson_ptr_id',
'data_inherited',
'fk_inherited_id',
'data_not_concrete_inherited',
'content_type_concrete_id',
'object_id_concrete',
],
BasePerson: [
'id',
'data_abstract',
'fk_abstract_id',
'data_not_concrete_abstract',
'content_type_abstract_id',
'object_id_abstract',
'data_base',
'fk_base_id',
'data_not_concrete_base',
'content_type_base_id',
'object_id_base',
],
AbstractPerson: [
'data_abstract',
'fk_abstract_id',
'data_not_concrete_abstract',
'content_type_abstract_id',
'object_id_abstract',
],
Relating: [
'id',
'baseperson_id',
'baseperson_hidden_id',
'person_id',
'person_hidden_id',
'proxyperson_id',
'proxyperson_hidden_id',
],
},
'local_fields': {
Person: [
'baseperson_ptr_id',
'data_inherited',
'fk_inherited_id',
'data_not_concrete_inherited',
'content_type_concrete_id',
'object_id_concrete',
],
BasePerson: [
'id',
'data_abstract',
'fk_abstract_id',
'data_not_concrete_abstract',
'content_type_abstract_id',
'object_id_abstract',
'data_base',
'fk_base_id',
'data_not_concrete_base',
'content_type_base_id',
'object_id_base',
],
AbstractPerson: [
'data_abstract',
'fk_abstract_id',
'data_not_concrete_abstract',
'content_type_abstract_id',
'object_id_abstract',
],
Relating: [
'id',
'baseperson_id',
'baseperson_hidden_id',
'person_id',
'person_hidden_id',
'proxyperson_id',
'proxyperson_hidden_id',
],
},
'local_concrete_fields': {
Person: [
'baseperson_ptr_id',
'data_inherited',
'fk_inherited_id',
'content_type_concrete_id',
'object_id_concrete',
],
BasePerson: [
'id',
'data_abstract',
'fk_abstract_id',
'content_type_abstract_id',
'object_id_abstract',
'data_base',
'fk_base_id',
'content_type_base_id',
'object_id_base',
],
AbstractPerson: [
'data_abstract',
'fk_abstract_id',
'content_type_abstract_id',
'object_id_abstract',
],
Relating: [
'id',
'baseperson_id',
'baseperson_hidden_id',
'person_id',
'person_hidden_id',
'proxyperson_id',
'proxyperson_hidden_id',
],
},
'many_to_many': {
Person: [
'm2m_abstract',
'friends_abstract',
'following_abstract',
'm2m_base',
'friends_base',
'following_base',
'm2m_inherited',
'friends_inherited',
'following_inherited',
],
BasePerson: [
'm2m_abstract',
'friends_abstract',
'following_abstract',
'm2m_base',
'friends_base',
'following_base',
],
AbstractPerson: [
'm2m_abstract',
'friends_abstract',
'following_abstract',
],
Relating: [
'basepeople',
'basepeople_hidden',
'people',
'people_hidden',
],
},
'many_to_many_with_model': {
Person: [
BasePerson,
BasePerson,
BasePerson,
BasePerson,
BasePerson,
BasePerson,
None,
None,
None,
],
BasePerson: [
None,
None,
None,
None,
None,
None,
],
AbstractPerson: [
None,
None,
None,
],
Relating: [
None,
None,
None,
None,
],
},
'get_all_related_objects_with_model': {
Person: (
('relating_baseperson', BasePerson),
('relating_person', None),
),
BasePerson: (
('person', None),
('relating_baseperson', None),
),
Relation: (
('fk_abstract_rel', None),
('fo_abstract_rel', None),
('fk_base_rel', None),
('fo_base_rel', None),
('fk_concrete_rel', None),
('fo_concrete_rel', None),
),
},
'get_all_related_objects_with_model_local': {
Person: (
('relating_person', None),
),
BasePerson: (
('person', None),
('relating_baseperson', None)
),
Relation: (
('fk_abstract_rel', None),
('fo_abstract_rel', None),
('fk_base_rel', None),
('fo_base_rel', None),
('fk_concrete_rel', None),
('fo_concrete_rel', None),
),
},
'get_all_related_objects_with_model_hidden': {
BasePerson: (
('model_meta:baseperson_friends_base', None),
('model_meta:baseperson_friends_base', None),
('model_meta:baseperson_m2m_base', None),
('model_meta:baseperson_following_base', None),
('model_meta:baseperson_following_base', None),
('model_meta:baseperson_m2m_abstract', None),
('model_meta:baseperson_friends_abstract', None),
('model_meta:baseperson_friends_abstract', None),
('model_meta:baseperson_following_abstract', None),
('model_meta:baseperson_following_abstract', None),
('model_meta:person', None),
('model_meta:relating_basepeople', None),
('model_meta:relating_basepeople_hidden', None),
('model_meta:relating', None),
('model_meta:relating', None),
),
Person: (
('model_meta:baseperson_friends_base', BasePerson),
('model_meta:baseperson_friends_base', BasePerson),
('model_meta:baseperson_m2m_base', BasePerson),
('model_meta:baseperson_following_base', BasePerson),
('model_meta:baseperson_following_base', BasePerson),
('model_meta:baseperson_m2m_abstract', BasePerson),
('model_meta:baseperson_friends_abstract', BasePerson),
('model_meta:baseperson_friends_abstract', BasePerson),
('model_meta:baseperson_following_abstract', BasePerson),
('model_meta:baseperson_following_abstract', BasePerson),
('model_meta:relating_basepeople', BasePerson),
('model_meta:relating_basepeople_hidden', BasePerson),
('model_meta:relating', BasePerson),
('model_meta:relating', BasePerson),
('model_meta:person_m2m_inherited', None),
('model_meta:person_friends_inherited', None),
('model_meta:person_friends_inherited', None),
('model_meta:person_following_inherited', None),
('model_meta:person_following_inherited', None),
('model_meta:relating_people', None),
('model_meta:relating_people_hidden', None),
('model_meta:relating', None),
('model_meta:relating', None),
),
Relation: (
('model_meta:baseperson_m2m_base', None),
('model_meta:baseperson_m2m_abstract', None),
('model_meta:baseperson', None),
('model_meta:baseperson', None),
('model_meta:baseperson', None),
('model_meta:baseperson', None),
('model_meta:baseperson', None),
('model_meta:baseperson', None),
('model_meta:person_m2m_inherited', None),
('model_meta:person', None),
('model_meta:person', None),
('model_meta:person', None),
('model_meta:person', None),
('model_meta:person', None),
('model_meta:proxyperson', None),
('model_meta:proxyperson', None),
('model_meta:proxyperson', None),
),
},
'get_all_related_objects_with_model_hidden_local': {
BasePerson: (
('model_meta:baseperson_friends_base', None),
('model_meta:baseperson_friends_base', None),
('model_meta:baseperson_m2m_base', None),
('model_meta:baseperson_following_base', None),
('model_meta:baseperson_following_base', None),
('model_meta:baseperson_m2m_abstract', None),
('model_meta:baseperson_friends_abstract', None),
('model_meta:baseperson_friends_abstract', None),
('model_meta:baseperson_following_abstract', None),
('model_meta:baseperson_following_abstract', None),
('model_meta:person', None),
('model_meta:relating_basepeople', None),
('model_meta:relating_basepeople_hidden', None),
('model_meta:relating', None),
('model_meta:relating', None),
),
Person: (
('model_meta:person_m2m_inherited', None),
('model_meta:person_friends_inherited', None),
('model_meta:person_friends_inherited', None),
('model_meta:person_following_inherited', None),
('model_meta:person_following_inherited', None),
('model_meta:relating_people', None),
('model_meta:relating_people_hidden', None),
('model_meta:relating', None),
('model_meta:relating', None),
),
Relation: (
('model_meta:baseperson_m2m_base', None),
('model_meta:baseperson_m2m_abstract', None),
('model_meta:baseperson', None),
('model_meta:baseperson', None),
('model_meta:baseperson', None),
('model_meta:baseperson', None),
('model_meta:baseperson', None),
('model_meta:baseperson', None),
('model_meta:person_m2m_inherited', None),
('model_meta:person', None),
('model_meta:person', None),
('model_meta:person', None),
('model_meta:person', None),
('model_meta:person', None),
('model_meta:proxyperson', None),
('model_meta:proxyperson', None),
('model_meta:proxyperson', None),
),
},
'get_all_related_objects_with_model_proxy': {
BasePerson: (
('person', None),
('relating_baseperson', None),
),
Person: (
('relating_baseperson', BasePerson),
('relating_person', None), ('relating_proxyperson', None),
),
Relation: (
('fk_abstract_rel', None), ('fo_abstract_rel', None),
('fk_base_rel', None), ('fo_base_rel', None),
('fk_concrete_rel', None), ('fo_concrete_rel', None),
),
},
'get_all_related_objects_with_model_proxy_hidden': {
BasePerson: (
('model_meta:baseperson_friends_base', None),
('model_meta:baseperson_friends_base', None),
('model_meta:baseperson_m2m_base', None),
('model_meta:baseperson_following_base', None),
('model_meta:baseperson_following_base', None),
('model_meta:baseperson_m2m_abstract', None),
('model_meta:baseperson_friends_abstract', None),
('model_meta:baseperson_friends_abstract', None),
('model_meta:baseperson_following_abstract', None),
('model_meta:baseperson_following_abstract', None),
('model_meta:person', None),
('model_meta:relating_basepeople', None),
('model_meta:relating_basepeople_hidden', None),
('model_meta:relating', None),
('model_meta:relating', None),
),
Person: (
('model_meta:baseperson_friends_base', BasePerson),
('model_meta:baseperson_friends_base', BasePerson),
('model_meta:baseperson_m2m_base', BasePerson),
('model_meta:baseperson_following_base', BasePerson),
('model_meta:baseperson_following_base', BasePerson),
('model_meta:baseperson_m2m_abstract', BasePerson),
('model_meta:baseperson_friends_abstract', BasePerson),
('model_meta:baseperson_friends_abstract', BasePerson),
('model_meta:baseperson_following_abstract', BasePerson),
('model_meta:baseperson_following_abstract', BasePerson),
('model_meta:relating_basepeople', BasePerson),
('model_meta:relating_basepeople_hidden', BasePerson),
('model_meta:relating', BasePerson),
('model_meta:relating', BasePerson),
('model_meta:person_m2m_inherited', None),
('model_meta:person_friends_inherited', None),
('model_meta:person_friends_inherited', None),
('model_meta:person_following_inherited', None),
('model_meta:person_following_inherited', None),
('model_meta:relating_people', None),
('model_meta:relating_people_hidden', None),
('model_meta:relating', None),
('model_meta:relating', None),
('model_meta:relating', None),
('model_meta:relating', None),
),
Relation: (
('model_meta:baseperson_m2m_base', None),
('model_meta:baseperson_m2m_abstract', None),
('model_meta:baseperson', None),
('model_meta:baseperson', None),
('model_meta:baseperson', None),
('model_meta:baseperson', None),
('model_meta:baseperson', None),
('model_meta:baseperson', None),
('model_meta:person_m2m_inherited', None),
('model_meta:person', None),
('model_meta:person', None),
('model_meta:person', None),
('model_meta:person', None),
('model_meta:person', None),
('model_meta:proxyperson', None),
('model_meta:proxyperson', None),
('model_meta:proxyperson', None),
),
},
'get_all_related_many_to_many_with_model': {
BasePerson: (
('friends_abstract_rel_+', None),
('followers_abstract', None),
('friends_base_rel_+', None),
('followers_base', None),
('relating_basepeople', None),
('+', None),
),
Person: (
('friends_abstract_rel_+', BasePerson),
('followers_abstract', BasePerson),
('friends_base_rel_+', BasePerson),
('followers_base', BasePerson),
('relating_basepeople', BasePerson),
('+', BasePerson),
('friends_inherited_rel_+', None),
('followers_concrete', None),
('relating_people', None),
('+', None),
),
Relation: (
('m2m_abstract_rel', None),
('m2m_base_rel', None),
('m2m_concrete_rel', None),
),
},
'get_all_related_many_to_many_local': {
BasePerson: [
'friends_abstract_rel_+',
'followers_abstract',
'friends_base_rel_+',
'followers_base',
'relating_basepeople',
'+',
],
Person: [
'friends_inherited_rel_+',
'followers_concrete',
'relating_people',
'+',
],
Relation: [
'm2m_abstract_rel',
'm2m_base_rel',
'm2m_concrete_rel',
],
},
'virtual_fields': {
AbstractPerson: [
'generic_relation_abstract',
'content_object_abstract',
],
BasePerson: [
'generic_relation_base',
'content_object_base',
'generic_relation_abstract',
'content_object_abstract',
],
Person: [
'content_object_concrete',
'generic_relation_concrete',
'generic_relation_base',
'content_object_base',
'generic_relation_abstract',
'content_object_abstract',
],
},
}
class OptionsBaseTests(test.TestCase):
def _map_rq_names(self, res):
return tuple([(o.field.related_query_name(), m) for o, m in res])
def _map_names(self, res):
return tuple([(f.name, m) for f, m in res])
class DataTests(OptionsBaseTests):
def test_fields(self):
for model, expected_result in TEST_RESULTS['fields'].items():
fields = model._meta.fields
self.assertEqual([f.attname for f in fields], expected_result)
def test_local_fields(self):
is_data_field = lambda f: isinstance(f, Field) and not isinstance(f, related.ManyToManyField)
for model, expected_result in TEST_RESULTS['local_fields'].items():
fields = model._meta.local_fields
self.assertEqual([f.attname for f in fields], expected_result)
self.assertTrue(all([f.model is model for f in fields]))
self.assertTrue(all([is_data_field(f) for f in fields]))
def test_local_concrete_fields(self):
for model, expected_result in TEST_RESULTS['local_concrete_fields'].items():
fields = model._meta.local_concrete_fields
self.assertEqual([f.attname for f in fields], expected_result)
self.assertTrue(all([f.column is not None for f in fields]))
class M2MTests(OptionsBaseTests):
def test_many_to_many(self):
for model, expected_result in TEST_RESULTS['many_to_many'].items():
fields = model._meta.many_to_many
self.assertEqual([f.attname for f in fields], expected_result)
self.assertTrue(all([isinstance(f.rel, related.ManyToManyRel) for f in fields]))
def test_many_to_many_with_model(self):
for model, expected_result in TEST_RESULTS['many_to_many_with_model'].items():
models = [model for field, model in model._meta.get_m2m_with_model()]
self.assertEqual(models, expected_result)
class RelatedObjectsTests(OptionsBaseTests):
def setUp(self):
self.key_name = lambda r: r[0]
def test_related_objects(self):
result_key = 'get_all_related_objects_with_model'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model()
self.assertEqual(self._map_rq_names(objects), expected)
def test_related_objects_local(self):
result_key = 'get_all_related_objects_with_model_local'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(local_only=True)
self.assertEqual(self._map_rq_names(objects), expected)
def test_related_objects_include_hidden(self):
result_key = 'get_all_related_objects_with_model_hidden'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(include_hidden=True)
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
def test_related_objects_include_hidden_local_only(self):
result_key = 'get_all_related_objects_with_model_hidden_local'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(
include_hidden=True, local_only=True)
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
def test_related_objects_proxy(self):
result_key = 'get_all_related_objects_with_model_proxy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(
include_proxy_eq=True)
self.assertEqual(self._map_rq_names(objects), expected)
def test_related_objects_proxy_hidden(self):
result_key = 'get_all_related_objects_with_model_proxy_hidden'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(
include_proxy_eq=True, include_hidden=True)
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
class RelatedM2MTests(OptionsBaseTests):
def test_related_m2m_with_model(self):
result_key = 'get_all_related_many_to_many_with_model'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_m2m_objects_with_model()
self.assertEqual(self._map_rq_names(objects), expected)
def test_related_m2m_local_only(self):
result_key = 'get_all_related_many_to_many_local'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_many_to_many_objects(local_only=True)
self.assertEqual([o.field.related_query_name() for o in objects], expected)
def test_related_m2m_asymmetrical(self):
m2m = Person._meta.many_to_many
self.assertTrue('following_base' in [f.attname for f in m2m])
related_m2m = Person._meta.get_all_related_many_to_many_objects()
self.assertTrue('followers_base' in [o.field.related_query_name() for o in related_m2m])
def test_related_m2m_symmetrical(self):
m2m = Person._meta.many_to_many
self.assertTrue('friends_base' in [f.attname for f in m2m])
related_m2m = Person._meta.get_all_related_many_to_many_objects()
self.assertIn('friends_inherited_rel_+', [o.field.related_query_name() for o in related_m2m])
class VirtualFieldsTests(OptionsBaseTests):
def test_virtual_fields(self):
for model, expected_names in TEST_RESULTS['virtual_fields'].items():
objects = model._meta.virtual_fields
self.assertEqual(sorted([f.name for f in objects]), sorted(expected_names))
class GetFieldByNameTests(OptionsBaseTests):
def test_get_data_field(self):
field_info = Person._meta.get_field_by_name('data_abstract')
self.assertEqual(field_info[1:], (BasePerson, True, False))
self.assertIsInstance(field_info[0], CharField)
def test_get_m2m_field(self):
field_info = Person._meta.get_field_by_name('m2m_base')
self.assertEqual(field_info[1:], (BasePerson, True, True))
self.assertIsInstance(field_info[0], related.ManyToManyField)
def test_get_related_object(self):
field_info = Person._meta.get_field_by_name('relating_baseperson')
self.assertEqual(field_info[1:], (BasePerson, False, False))
self.assertIsInstance(field_info[0], related.RelatedObject)
def test_get_related_m2m(self):
field_info = Person._meta.get_field_by_name('relating_people')
self.assertEqual(field_info[1:], (None, False, True))
self.assertIsInstance(field_info[0], related.RelatedObject)
def test_get_generic_foreign_key(self):
# For historic reasons generic foreign keys aren't available.
with self.assertRaises(FieldDoesNotExist):
Person._meta.get_field_by_name('content_object_base')
def test_get_generic_relation(self):
field_info = Person._meta.get_field_by_name('generic_relation_base')
self.assertEqual(field_info[1:], (None, True, False))
self.assertIsInstance(field_info[0], GenericRelation)
| [
"[email protected]"
] | |
7753fba106e04f5a55c9d9234dfd02dacd0afb9b | 6e9d54971c55336fe93551d38e3fc7929b6ac548 | /1008.ConstructBinarySearchTreefromPreorderTraversal.py | 5cef22d795f2cc7e88c313bb9bfd1a9a5673cc3d | [] | no_license | aucan/LeetCode-problems | 61549b69c33a9ac94b600791c4055d4fbfb5a0c3 | 57b84c684a9171100166133ee04a69665334ca84 | refs/heads/master | 2023-04-12T00:46:26.521951 | 2021-05-04T13:44:28 | 2021-05-04T13:44:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,535 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 20 11:04:36 2020
@author: nenad
"""
"""
Problem URL: https://leetcode.com/problems/construct-binary-search-tree-from-preorder-traversal/
Problem description:
Return the root node of a binary search tree that matches the given preorder traversal.
(Recall that a binary search tree is a binary tree where for every node, any descendant of node.left has a value < node.val, and any descendant of node.right has a value > node.val. Also recall that a preorder traversal displays the value of the node first, then traverses node.left, then traverses node.right.)
Example 1:
Input: [8,5,1,7,10,12]
Output: [8,5,10,1,7,null,12]
Note:
1 <= preorder.length <= 100
The values
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# Time: O(n), space: O(n)
class Solution:
index = 0
def bstFromPreorder(self, preorder) -> TreeNode:
n = len(preorder)
def reconstructBST(minimum, maximum):
# all elements from preorder are used
if Solution.index >= n:
return
root = None
# take next element from preorder array
nodeValue = preorder[Solution.index]
# node belongs to the current subtree
if minimum < nodeValue < maximum:
# create new node
root = TreeNode(nodeValue)
# go to next index
Solution.index += 1
if Solution.index < n:
# reconstruct left and right subtree
# maximum value in the left subtree will be value of current node (all values in left subtree are smaller)
# minimum value in the right subtree will be value of current node (all values in right subtree are greater)
root.left = reconstructBST(minimum, nodeValue)
root.right = reconstructBST(nodeValue, maximum)
return root
# initial bounds are - -oo and +oo
root = reconstructBST(float("-inf"), float("inf"))
# reset index - for test cases sake
Solution.index = 0
return root
def preorder(root):
if root is None:
return
print(root.val, end=" ")
preorder(root.left)
preorder(root.right)
sol = Solution()
# Test 1
root = sol.bstFromPreorder([8,5,1,7,10,12])
preorder(root)
| [
"[email protected]"
] | |
a20a45a37faa268905d98259d366dd9792eac8a8 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-eg/huaweicloudsdkeg/v1/model/update_channel_request.py | 16e1db4ed57fc9808a3041c879fafe6b983edd17 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,925 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class UpdateChannelRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'channel_id': 'str',
'body': 'ChannelUpdateReq'
}
attribute_map = {
'channel_id': 'channel_id',
'body': 'body'
}
def __init__(self, channel_id=None, body=None):
"""UpdateChannelRequest
The model defined in huaweicloud sdk
:param channel_id: 指定查询的事件通道ID
:type channel_id: str
:param body: Body of the UpdateChannelRequest
:type body: :class:`huaweicloudsdkeg.v1.ChannelUpdateReq`
"""
self._channel_id = None
self._body = None
self.discriminator = None
self.channel_id = channel_id
if body is not None:
self.body = body
@property
def channel_id(self):
"""Gets the channel_id of this UpdateChannelRequest.
指定查询的事件通道ID
:return: The channel_id of this UpdateChannelRequest.
:rtype: str
"""
return self._channel_id
@channel_id.setter
def channel_id(self, channel_id):
"""Sets the channel_id of this UpdateChannelRequest.
指定查询的事件通道ID
:param channel_id: The channel_id of this UpdateChannelRequest.
:type channel_id: str
"""
self._channel_id = channel_id
@property
def body(self):
"""Gets the body of this UpdateChannelRequest.
:return: The body of this UpdateChannelRequest.
:rtype: :class:`huaweicloudsdkeg.v1.ChannelUpdateReq`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this UpdateChannelRequest.
:param body: The body of this UpdateChannelRequest.
:type body: :class:`huaweicloudsdkeg.v1.ChannelUpdateReq`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateChannelRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
8a2f0e7ff7c6835148031531256843f97c099281 | 1cc344683f0cb166f17bcd74bca5656124597a7f | /ideal_college/staff/admin.py | 6b4e21ea97f7a2d5fcef2e97c1c2796f58a2ebec | [] | no_license | technomicssolutions/ideal_college | f15974b8d048b40c8f27212840599fc28bf02d76 | 712bd89ee8428cd5ae4473ccab417f2755fc4ecc | refs/heads/master | 2020-06-05T00:15:13.337423 | 2015-04-28T06:52:09 | 2015-04-28T06:52:09 | 23,145,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | from django.contrib import admin
from staff.models import *
admin.site.register(Designation)
admin.site.register(Staff) | [
"[email protected]"
] | |
eb146857efd243b4b309e66308a85ae8576f3360 | 71324aca11e16d6da17b0440e72d0107f5af6e04 | /ptt_blog/blog/models.py | aa4bd122f7bcb84f921c94e8680a92d716cb437f | [
"MIT"
] | permissive | n3k0fi5t/Django_Tutorial | 6bad82a919d1de0162b34f4c7f753cd126b05cc3 | e3953335ca88fe22c68268fd76afb7c4f9bbb55f | refs/heads/master | 2023-02-16T07:56:56.416031 | 2021-01-11T23:17:33 | 2021-01-11T23:17:33 | 291,436,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | from django.db import models
# Create your models here.
class Post(models.Model):
title = models.CharField(max_length=50)
content = models.TextField()
date = models.DateTimeField(auto_now_add=True)
class Meta:
ordered = ['-date', ]
class Push(models.Model):
pusher = models.CharField(max_length=50)
content = models.TextField()
post = models.ForeignKey(Post, on_delete=models.CASCADE)
class PostImage(models.Model):
url = models.TextField(null=False)
post = models.ManyToManyField(Post)
| [
"[email protected]"
] | |
55f4af44562a8fc8d1b2bddb29a09dc31b69a781 | 4913fb7fd32c3dd0da53af7a012569ec2254b35a | /86.继承.py | 2b60c2224b77d2ecd852301be1d17e7343045f9e | [] | no_license | puhaoran12/python_note | 8a21954050ba3126f2ef6d5d1e4a2904df954b9b | b807e7b7dd90c87cee606f50421400c8f3d0ba03 | refs/heads/master | 2023-07-07T20:20:04.546541 | 2021-08-21T02:17:12 | 2021-08-21T02:17:12 | 398,439,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | # 继承
# 语法格式:
# class 子类类名(父类1,父类2...):
# pass
# 如果一个类没有继承任何类,则默认继承object
# 定义子类时,必须在其构造函数中调用父类的构造函数
class Person(object):
def __init__(self,name,age):
self.name=name
self.age=age
def info(self):
print('姓名:{0},年龄:{1}'.format(self.name,self.age))
class Student(Person):
def __init__(self,name,age,score):
super().__init__(name,age)
self.score=score
class Teacher(Person):
def __init__(self,name,age,teacheryear):
super().__init__(name,age)
self.teacheryear=teacheryear
stu=Student('张三',20,100)
stu.info()
print(stu.score)
tea=Teacher('李四',50,28)
tea.info()
print(tea.teacheryear) | [
"[email protected]"
] | |
5fd4bd2fcda784801ee0548c57a0339b09041fa4 | 76f1331d083d360fb3822312537e72d4ff9d50b5 | /keywords_extraction/multilanguage/util/file_utils.py | 654041f49873d8e758e9dc718089da91ad352125 | [] | no_license | ZouJoshua/ml_project | 2fe0efee49aa1454b04cd83c61455232601720a6 | b1d8eb050182cd782bc6f3bb3ac1429fe22ab7b7 | refs/heads/master | 2021-07-22T10:37:56.452484 | 2020-05-09T09:54:39 | 2020-05-09T09:54:39 | 158,562,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author : Joshua
@Time : 12/5/19 1:47 PM
@File : file_utils.py
@Desc :
"""
from keywords_extraction.multilanguage.core.language import Language
def writeLines(filePath, lines):
writer = open(filePath, 'w', encoding='utf-8')
for line in lines:
writer.write(line + '\n')
writer.close()
def readLines(filePath):
reader = open(filePath, 'r', encoding='utf-8')
return [line.strip() for line in reader.readlines()]
def readLanguages(filePath):
return [Language(kv[0], kv[1]) for kv in [line.split('\t') for line in readLines(filePath)]]
def readStopwords(filePath):
return set(readLines(filePath)) | [
"[email protected]"
] | |
437324b82fcec902626d1fcec0186f7e95d6bed7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02270/s773469916.py | da9d345dfc9c9101bb74b1f5e3021c1dd24ec32e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | import math
n, k = map(int, input().split())
num = [int(input()) for i in range(n)]
#num.sort()
left = max(max(num), math.ceil(sum(num)/k))
right = sum(num)
while left < right:
mid = (left + right) // 2
track = 1
cnt = 0
flag = 0
for i in num:
cnt += i
if cnt > mid:
track+=1
cnt = i
if track > k:
flag = 1
break
if flag:
break
if flag == 0:
right = mid
else:
left = mid+1
print(left)
| [
"[email protected]"
] | |
343da047233e132fb28a1787d96c19fa6b312cc8 | 87b006149b16a3028385fc58cf781f5a12c94ad9 | /PyFunceble/query/http_status_code.py | cd4836d29c4b0b24c22337f13be5a6a56335e063 | [
"Apache-2.0"
] | permissive | spirillen/PyFunceble | 04d03b2678ad46ec81c520a32df5397832414451 | 3c8f62062bffa0e16d465c150a853af8bf2f2205 | refs/heads/master | 2023-05-12T04:32:04.587521 | 2022-11-20T11:19:06 | 2022-11-20T11:19:06 | 237,827,167 | 2 | 0 | Apache-2.0 | 2021-01-27T10:09:59 | 2020-02-02T19:50:47 | Python | UTF-8 | Python | false | false | 11,767 | py | """
The tool to check the availability or syntax of domain, IP or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Provides our interface for getting the status code of a given subject.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/#/special-thanks
Contributors:
https://pyfunceble.github.io/#/contributors
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/latest/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020, 2022 Nissar Chababy
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import functools
import socket
from typing import Optional, Union
import PyFunceble.facility
import PyFunceble.factory
import PyFunceble.storage
from PyFunceble.converter.url2netloc import Url2Netloc
class HTTPStatusCode:
"""
Provides an interface for the extration of the HTTP status code.
"""
STD_UNKNOWN_STATUS_CODE: int = 99999999
STD_TIMEOUT: float = 5.0
STD_VERIFY_CERTIFICATE: bool = True
STD_ALLOW_REDIRECTS: bool = False
_subject: Optional[str] = None
_timeout: float = 5.0
_verify_certificate: bool = True
_allow_redirects: bool = False
_url2netloc: Optional[Url2Netloc] = None
def __init__(
self,
subject: Optional[str] = None,
*,
timeout: Optional[float] = None,
verify_certificate: Optional[bool] = None,
allow_redirects: Optional[bool] = None,
) -> None:
if subject is not None:
self.subject = subject
if timeout is not None:
self.timeout = timeout
else:
self.guess_and_set_timeout()
if verify_certificate is not None:
self.verify_certificate = verify_certificate
else:
self.guess_and_set_verify_certificate()
if allow_redirects is not None:
self.allow_redirects = allow_redirects
else:
self.allow_redirects = self.STD_ALLOW_REDIRECTS
self._url2netloc = Url2Netloc()
def ensure_subject_is_given(func): # pylint: disable=no-self-argument
"""
Ensures that the subject is given before running the decorated method.
:raise TypeError:
If the subject is not a string.
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if not isinstance(self.subject, str):
raise TypeError(
f"<self.subject> should be {str}, {type(self.subject)} given."
)
return func(self, *args, **kwargs) # pylint: disable=not-callable
return wrapper
@property
def subject(self) -> Optional[str]:
"""
Provides the current state of the :code:`_subject` attribute.
"""
return self._subject
@subject.setter
def subject(self, value: str) -> None:
"""
Sets the subject to work with.
:param value:
The subject to set.
:raise TypeError:
When the given :code:`value` is not a :py:class:`str`.
:raise ValueError:
When the given :code:`value` is empty.
"""
if not isinstance(value, str):
raise TypeError(f"<value> should be {str}, {type(value)} given.")
if not value:
raise ValueError("<value> should not be empty.")
self._subject = value
def set_subject(self, value: str) -> "HTTPStatusCode":
"""
Sets the subject to work with.
:param value:
The subject to set.
"""
self.subject = value
return self
@property
def timeout(self) -> float:
"""
Provides the current state of the :code:`_timeout` attribute.
"""
return self._timeout
@timeout.setter
def timeout(self, value: Union[float, int]) -> None:
"""
Sets the timeout to apply.
:param value:
The timeout to apply.
:raise TypeError:
When the given :code:`value` is not a :py:class:`int`
nor :py:class:`float`.
:raise ValueError:
When the given :code:`value` is less than `1`.
"""
if not isinstance(value, (int, float)):
raise TypeError(f"<value> should be {int} or {float}, {type(value)} given.")
if value < 0:
raise ValueError(f"<value> ({value!r}) shouldn't be less than 0.")
self._timeout = float(value)
def set_timeout(self, value: Union[float, int]) -> "HTTPStatusCode":
"""
Sets the timeout to apply.
:param value:
The timeout to apply.
"""
self.timeout = value
return self
def guess_and_set_timeout(self) -> "HTTPStatusCode":
"""
Tries to guess and set the timeout from the configuration.
"""
if PyFunceble.facility.ConfigLoader.is_already_loaded():
self.timeout = PyFunceble.storage.CONFIGURATION.lookup.timeout
else:
self.timeout = self.STD_TIMEOUT
return self
@property
def verify_certificate(self) -> bool:
"""
Provides the current state of the :code:`verify_certificate` attribute.
"""
return self._verify_certificate
@verify_certificate.setter
def verify_certificate(self, value: bool) -> None:
"""
Sets the value of the :code:`verify_certificate` variable.
:param value:
The value to set.
:raise TypeError:
When the given :code:`value` is not a :py:class:`bool`.
"""
if not isinstance(value, bool):
raise TypeError(f"<value> should be {bool}, {type(value)} given.")
self._verify_certificate = value
def set_verify_certificate(self, value: bool) -> "HTTPStatusCode":
"""
Sets the value of the :code:`verify_certificate` variable.
:param value:
The value to set.
"""
self.verify_certificate = value
return self
def guess_and_set_verify_certificate(self) -> "HTTPStatusCode":
"""
Tries to guess and set the :code:`verify_certificate` attribute.
"""
if PyFunceble.facility.ConfigLoader.is_already_loaded():
self.verify_certificate = bool(
PyFunceble.storage.CONFIGURATION["verify_ssl_certificate"]
)
else:
self.verify_certificate = self.STD_VERIFY_CERTIFICATE
return self
@property
def allow_redirects(self) -> bool:
"""
Provides the current state of the :code:`_allow_redirects` attribute.
"""
return self._allow_redirects
@allow_redirects.setter
def allow_redirects(self, value: bool) -> None:
"""
Sets the value of the :code:`verify_certificate` variable.
:param value:
The value to set.
:raise TypeError:
When the given :code:`value` is not a :py:class:`bool`.
"""
if not isinstance(value, bool):
raise TypeError(f"<value> should be {bool}, {type(value)} given.")
self._allow_redirects = value
def set_allow_redirects(self, value: bool) -> "HTTPStatusCode":
"""
Sets the value of the :code:`verify_certificate` variable.
:param value:
The value to set.
"""
self.allow_redirects = value
return self
@ensure_subject_is_given
def get_status_code(self) -> int:
"""
Provides the status code.
.. note::
The HTTP status code provided will differs regarding the following
conditions.
Assuming, that :code:`allow_redirects` is set to :py:class:`False`,
you will be provided the following:
- :code:`http://example.org (302) -> https://example.org (200) ===> 200`
- :code:`http://example.org (302) -> https://test.example.rog (200) ===> 302`
- :code:`http://example.org (302) -> https://test.example.org (301) -> https://example.org (200) ===> 302
On the other site if the :code:`allow_redirects` property is set to
:py:class:`True`, this method will provide the status of the
last one in the redirection order.
In case of any error, this method will provide the default one.
""" # pylint: disable=line-too-long
try:
req = PyFunceble.factory.Requester.get(
self.subject,
timeout=self.timeout,
verify=self.verify_certificate,
allow_redirects=True,
)
first_origin = self._url2netloc.set_data_to_convert(
self.subject
).get_converted()
if len(req.history) > 1:
final_origin = self._url2netloc.set_data_to_convert(
req.history[1].url
).get_converted()
else:
final_origin = self._url2netloc.set_data_to_convert(
req.url
).get_converted()
if (
not self.allow_redirects
and first_origin != final_origin
and req.history
):
return req.history[0].status_code
return req.status_code
except (
PyFunceble.factory.Requester.exceptions.RequestException,
PyFunceble.factory.Requester.exceptions.InvalidSchema,
PyFunceble.factory.Requester.exceptions.InvalidURL,
PyFunceble.factory.Requester.exceptions.MissingSchema,
socket.timeout,
PyFunceble.factory.Requester.urllib3_exceptions.InvalidHeader,
):
pass
return self.STD_UNKNOWN_STATUS_CODE
| [
"[email protected]"
] | |
b6c3622f32e6ca37c49c9b38c97cdb597eb83ca8 | d9c0a55dfc3a87e4166f3ee73d60d302730278d1 | /board/urls.py | 2215b2f3120742acd4a673f42981a768205a51a5 | [] | no_license | mzazakeith/codeboard | 358872794b1b2c35886bde8c26631e7d4877883c | f73b9a303d6ab5984c4ce73684e9ed34ceff4fe4 | refs/heads/development | 2020-03-25T15:59:23.795795 | 2018-08-10T07:20:36 | 2018-08-10T07:20:36 | 143,910,011 | 0 | 0 | null | 2018-08-10T07:20:37 | 2018-08-07T18:07:48 | Python | UTF-8 | Python | false | false | 625 | py | from django.conf.urls import url, include
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^home$', views.home, name='home'),
url(r'^new-service$', views.new_service, name='new-service'),
url(r'^userprofile/(?P<user_id>\d+)', views.userprofile, name='profile'),
url(r'^forum$', views.forum, name='forum'),
url(r'^comment/(?P<topic_id>\d+)', views.comment, name='comment'),
url(r'^read/(?P<msg_id>\d+)', views.read, name='read'),
url(r'^rate/(?P<user_id>\d+)', views.rate, name='rate'),
url(r'^all-services/', views.get_services, name='all-services')
]
| [
"[email protected]"
] | |
5d43aaf753ce4267514d56d9a4f3b9e3b905f9c2 | 3d989666e6ceb2abc9175dcf7b1d0c1f8c76d205 | /py_solution/p119_yanghui_ii.py | 610dda33ec40d7516cd4cee381295e6206749b63 | [] | no_license | dengshilong/leetcode | 00ae0898b4645efd1de69a13f2fa92606e899297 | 5ab258f04771db37a3beb3cb0c490a06183f7b51 | refs/heads/master | 2021-01-10T11:58:10.396399 | 2020-04-10T12:10:54 | 2020-04-10T12:10:54 | 47,912,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | class Solution(object):
def getRow(self, rowIndex):
"""
:type rowIndex: int
:rtype: List[int]
"""
res = [0] * (rowIndex + 1)
res[0] = 1
for i in range(1, rowIndex + 1):
for j in range(i, 0, -1):
if j == i:
res[j] = 1
elif j == 0:
res[j] = 1
else:
res[j] = res[j] + res[j - 1]
return res
if __name__ == "__main__":
solution = Solution()
assert solution.getRow(3) == [1, 3, 3, 1]
| [
"[email protected]"
] | |
0e26994af468ecff3062e2111115bd78d92a4d34 | 2869eb01810389ab7b64355ec189800e7b1d49b9 | /picoCTF 2021/Cryptography/ddes/ddes.py | d417d6774654db27ccb697bcb42c0ffd37d83515 | [] | no_license | Giantpizzahead/ctf-archive | 096b1673296510bddef9f284700ebdb0e76d71a7 | 5063cd2889cd300aade440429faf9c4ca68511ef | refs/heads/master | 2021-12-28T14:23:33.598960 | 2021-12-20T17:23:55 | 2021-12-20T17:23:55 | 252,905,854 | 1 | 0 | null | 2021-12-20T17:14:05 | 2020-04-04T04:11:24 | C | UTF-8 | Python | false | false | 1,023 | py | #!/usr/bin/python3 -u
from Crypto.Cipher import DES
import binascii
import itertools
import random
import string
def pad(msg):
block_len = 8
over = len(msg) % block_len
pad = block_len - over
return (msg + " " * pad).encode()
def generate_key():
return pad("".join(random.choice(string.digits) for _ in range(6)))
FLAG = open("flag").read().rstrip()
KEY1 = generate_key()
KEY2 = generate_key()
def get_input():
try:
res = binascii.unhexlify(input("What data would you like to encrypt? ").rstrip()).decode()
except:
res = None
exit(0)
return res
def double_encrypt(m):
msg = pad(m)
cipher1 = DES.new(KEY1, DES.MODE_ECB)
enc_msg = cipher1.encrypt(msg)
cipher2 = DES.new(KEY2, DES.MODE_ECB)
return binascii.hexlify(cipher2.encrypt(enc_msg)).decode()
print("Here is the flag:")
print(double_encrypt(FLAG))
while True:
inputs = get_input()
if inputs:
print(double_encrypt(inputs))
else:
print("Invalid input.")
| [
"[email protected]"
] | |
b1eb29ee864eccdeb3ab8cfa998a45eb742f03e2 | 44bbfe1c9a7f16e632cdd27c2de058033b33ea6d | /mayan/apps/document_indexing/migrations/0013_auto_20170714_2133.py | 6ccaeb6689a30d2b114e6f833b74cc71243d8372 | [
"Apache-2.0",
"ISC",
"MIT"
] | permissive | lxny2004/open-paperless | 34025c3e8ac7b4236b0d8fc5ca27fc11d50869bc | a8b45f8f0ee5d7a1b9afca5291c6bfaae3db8280 | refs/heads/master | 2020-04-27T04:46:25.992405 | 2019-03-06T03:30:15 | 2019-03-06T03:30:15 | 174,064,366 | 0 | 0 | NOASSERTION | 2019-03-06T03:29:20 | 2019-03-06T03:29:20 | null | UTF-8 | Python | false | false | 480 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-14 21:33
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('document_indexing', '0012_auto_20170530_0728'),
]
operations = [
migrations.AlterModelOptions(
name='index',
options={'ordering': ('label',), 'verbose_name': 'Index', 'verbose_name_plural': 'Indexes'},
),
]
| [
"[email protected]"
] | |
b62113cd03cc7ab736d4f927336349eb36009d71 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2606/59018/259657.py | cd4fd07059a27026b20e660f67c13043d6659581 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | a1=input()[1:-2].split(',')
a=[int(y) for y in a1]
target=int(input())
print(a.index(target)) | [
"[email protected]"
] | |
4eb91ee482ea775363eefe35c1d564339ab6ae90 | f6688132ec14a9d03c8bb05e85819f810fd3e4e6 | /tfold/object_detection/protos/ssd_pb2.py | 4e8d32ed2521c56c9b2a282a8c96024adf452b5d | [
"Apache-2.0"
] | permissive | mariusionescu/tfold | 44515b9eba027a8d4a9265e6f7299dc08294dc42 | b6a9913d29a62326bfc3086fa14ed317d1e02a0a | refs/heads/master | 2020-04-08T19:59:39.676558 | 2018-12-05T19:47:57 | 2018-12-05T19:47:57 | 159,679,441 | 0 | 0 | Apache-2.0 | 2018-11-29T14:33:13 | 2018-11-29T14:33:12 | null | UTF-8 | Python | false | true | 23,985 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tfold.object_detection/protos/ssd.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tfold.object_detection.protos import anchor_generator_pb2 as object__detection_dot_protos_dot_anchor__generator__pb2
from tfold.object_detection.protos import box_coder_pb2 as object__detection_dot_protos_dot_box__coder__pb2
from tfold.object_detection.protos import box_predictor_pb2 as object__detection_dot_protos_dot_box__predictor__pb2
from tfold.object_detection.protos import hyperparams_pb2 as object__detection_dot_protos_dot_hyperparams__pb2
from tfold.object_detection.protos import image_resizer_pb2 as object__detection_dot_protos_dot_image__resizer__pb2
from tfold.object_detection.protos import matcher_pb2 as object__detection_dot_protos_dot_matcher__pb2
from tfold.object_detection.protos import losses_pb2 as object__detection_dot_protos_dot_losses__pb2
from tfold.object_detection.protos import post_processing_pb2 as object__detection_dot_protos_dot_post__processing__pb2
from tfold.object_detection.protos import \
region_similarity_calculator_pb2 as object__detection_dot_protos_dot_region__similarity__calculator__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='object_detection/protos/ssd.proto',
package='object_detection.protos',
syntax='proto2',
serialized_options=None,
serialized_pb=_b(
'\n!object_detection/protos/ssd.proto\x12\x17object_detection.protos\x1a.object_detection/protos/anchor_generator.proto\x1a\'object_detection/protos/box_coder.proto\x1a+object_detection/protos/box_predictor.proto\x1a)object_detection/protos/hyperparams.proto\x1a+object_detection/protos/image_resizer.proto\x1a%object_detection/protos/matcher.proto\x1a$object_detection/protos/losses.proto\x1a-object_detection/protos/post_processing.proto\x1a:object_detection/protos/region_similarity_calculator.proto\"\xa7\x08\n\x03Ssd\x12\x13\n\x0bnum_classes\x18\x01 \x01(\x05\x12<\n\rimage_resizer\x18\x02 \x01(\x0b\x32%.object_detection.protos.ImageResizer\x12G\n\x11\x66\x65\x61ture_extractor\x18\x03 \x01(\x0b\x32,.object_detection.protos.SsdFeatureExtractor\x12\x34\n\tbox_coder\x18\x04 \x01(\x0b\x32!.object_detection.protos.BoxCoder\x12\x31\n\x07matcher\x18\x05 \x01(\x0b\x32 .object_detection.protos.Matcher\x12R\n\x15similarity_calculator\x18\x06 \x01(\x0b\x32\x33.object_detection.protos.RegionSimilarityCalculator\x12)\n\x1a\x65ncode_background_as_zeros\x18\x0c \x01(\x08:\x05\x66\x61lse\x12 \n\x15negative_class_weight\x18\r \x01(\x02:\x01\x31\x12<\n\rbox_predictor\x18\x07 \x01(\x0b\x32%.object_detection.protos.BoxPredictor\x12\x42\n\x10\x61nchor_generator\x18\x08 \x01(\x0b\x32(.object_detection.protos.AnchorGenerator\x12@\n\x0fpost_processing\x18\t \x01(\x0b\x32\'.object_detection.protos.PostProcessing\x12+\n\x1dnormalize_loss_by_num_matches\x18\n \x01(\x08:\x04true\x12-\n\x1enormalize_loc_loss_by_codesize\x18\x0e \x01(\x08:\x05\x66\x61lse\x12+\n\x04loss\x18\x0b \x01(\x0b\x32\x1d.object_detection.protos.Loss\x12\x1f\n\x10\x66reeze_batchnorm\x18\x10 \x01(\x08:\x05\x66\x61lse\x12\'\n\x18inplace_batchnorm_update\x18\x0f \x01(\x08:\x05\x66\x61lse\x12.\n\x1fweight_regression_loss_by_score\x18\x11 \x01(\x08:\x05\x66\x61lse\x12>\n/use_expected_classification_loss_under_sampling\x18\x12 \x01(\x08:\x05\x66\x61lse\x12#\n\x18min_num_negative_samples\x18\x13 \x01(\x02:\x01\x30\x12*\n\x1f\x64\x65sired_negative_sampling_ratio\x18\x14 \x01(\x02:\x01\x33\x12\"\n\x14\x61\x64\x64_background_class\x18\x15 \x01(\x08:\x04true\"\xf6\x02\n\x13SsdFeatureExtractor\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x1b\n\x10\x64\x65pth_multiplier\x18\x02 \x01(\x02:\x01\x31\x12\x15\n\tmin_depth\x18\x03 \x01(\x05:\x02\x31\x36\x12>\n\x10\x63onv_hyperparams\x18\x04 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12:\n+override_base_feature_extractor_hyperparams\x18\t \x01(\x08:\x05\x66\x61lse\x12\x1a\n\x0fpad_to_multiple\x18\x05 \x01(\x05:\x01\x31\x12#\n\x14use_explicit_padding\x18\x07 \x01(\x08:\x05\x66\x61lse\x12\x1c\n\ruse_depthwise\x18\x08 \x01(\x08:\x05\x66\x61lse\x12<\n\x03\x66pn\x18\n \x01(\x0b\x32/.object_detection.protos.FeaturePyramidNetworksJ\x04\x08\x06\x10\x07\"i\n\x16\x46\x65\x61turePyramidNetworks\x12\x14\n\tmin_level\x18\x01 \x01(\x05:\x01\x33\x12\x14\n\tmax_level\x18\x02 \x01(\x05:\x01\x37\x12#\n\x16\x61\x64\x64itional_layer_depth\x18\x03 \x01(\x05:\x03\x32\x35\x36')
,
dependencies=[object__detection_dot_protos_dot_anchor__generator__pb2.DESCRIPTOR,
object__detection_dot_protos_dot_box__coder__pb2.DESCRIPTOR,
object__detection_dot_protos_dot_box__predictor__pb2.DESCRIPTOR,
object__detection_dot_protos_dot_hyperparams__pb2.DESCRIPTOR,
object__detection_dot_protos_dot_image__resizer__pb2.DESCRIPTOR,
object__detection_dot_protos_dot_matcher__pb2.DESCRIPTOR,
object__detection_dot_protos_dot_losses__pb2.DESCRIPTOR,
object__detection_dot_protos_dot_post__processing__pb2.DESCRIPTOR,
object__detection_dot_protos_dot_region__similarity__calculator__pb2.DESCRIPTOR, ])
_SSD = _descriptor.Descriptor(
name='Ssd',
full_name='object_detection.protos.Ssd',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='num_classes', full_name='object_detection.protos.Ssd.num_classes', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_resizer', full_name='object_detection.protos.Ssd.image_resizer', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='feature_extractor', full_name='object_detection.protos.Ssd.feature_extractor', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='box_coder', full_name='object_detection.protos.Ssd.box_coder', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='matcher', full_name='object_detection.protos.Ssd.matcher', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='similarity_calculator', full_name='object_detection.protos.Ssd.similarity_calculator', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='encode_background_as_zeros', full_name='object_detection.protos.Ssd.encode_background_as_zeros',
index=6,
number=12, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='negative_class_weight', full_name='object_detection.protos.Ssd.negative_class_weight', index=7,
number=13, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='box_predictor', full_name='object_detection.protos.Ssd.box_predictor', index=8,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='anchor_generator', full_name='object_detection.protos.Ssd.anchor_generator', index=9,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='post_processing', full_name='object_detection.protos.Ssd.post_processing', index=10,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='normalize_loss_by_num_matches', full_name='object_detection.protos.Ssd.normalize_loss_by_num_matches',
index=11,
number=10, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='normalize_loc_loss_by_codesize',
full_name='object_detection.protos.Ssd.normalize_loc_loss_by_codesize', index=12,
number=14, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='loss', full_name='object_detection.protos.Ssd.loss', index=13,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_batchnorm', full_name='object_detection.protos.Ssd.freeze_batchnorm', index=14,
number=16, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inplace_batchnorm_update', full_name='object_detection.protos.Ssd.inplace_batchnorm_update', index=15,
number=15, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight_regression_loss_by_score',
full_name='object_detection.protos.Ssd.weight_regression_loss_by_score', index=16,
number=17, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_expected_classification_loss_under_sampling',
full_name='object_detection.protos.Ssd.use_expected_classification_loss_under_sampling', index=17,
number=18, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_num_negative_samples', full_name='object_detection.protos.Ssd.min_num_negative_samples', index=18,
number=19, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='desired_negative_sampling_ratio',
full_name='object_detection.protos.Ssd.desired_negative_sampling_ratio', index=19,
number=20, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(3),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='add_background_class', full_name='object_detection.protos.Ssd.add_background_class', index=20,
number=21, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=469,
serialized_end=1532,
)
_SSDFEATUREEXTRACTOR = _descriptor.Descriptor(
name='SsdFeatureExtractor',
full_name='object_detection.protos.SsdFeatureExtractor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='object_detection.protos.SsdFeatureExtractor.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='depth_multiplier', full_name='object_detection.protos.SsdFeatureExtractor.depth_multiplier', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_depth', full_name='object_detection.protos.SsdFeatureExtractor.min_depth', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=16,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='conv_hyperparams', full_name='object_detection.protos.SsdFeatureExtractor.conv_hyperparams', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='override_base_feature_extractor_hyperparams',
full_name='object_detection.protos.SsdFeatureExtractor.override_base_feature_extractor_hyperparams',
index=4,
number=9, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pad_to_multiple', full_name='object_detection.protos.SsdFeatureExtractor.pad_to_multiple', index=5,
number=5, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_explicit_padding', full_name='object_detection.protos.SsdFeatureExtractor.use_explicit_padding',
index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_depthwise', full_name='object_detection.protos.SsdFeatureExtractor.use_depthwise', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fpn', full_name='object_detection.protos.SsdFeatureExtractor.fpn', index=8,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1535,
serialized_end=1909,
)
_FEATUREPYRAMIDNETWORKS = _descriptor.Descriptor(
name='FeaturePyramidNetworks',
full_name='object_detection.protos.FeaturePyramidNetworks',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='min_level', full_name='object_detection.protos.FeaturePyramidNetworks.min_level', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=3,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_level', full_name='object_detection.protos.FeaturePyramidNetworks.max_level', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=7,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='additional_layer_depth',
full_name='object_detection.protos.FeaturePyramidNetworks.additional_layer_depth', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=256,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1911,
serialized_end=2016,
)
_SSD.fields_by_name['image_resizer'].message_type = object__detection_dot_protos_dot_image__resizer__pb2._IMAGERESIZER
_SSD.fields_by_name['feature_extractor'].message_type = _SSDFEATUREEXTRACTOR
_SSD.fields_by_name['box_coder'].message_type = object__detection_dot_protos_dot_box__coder__pb2._BOXCODER
_SSD.fields_by_name['matcher'].message_type = object__detection_dot_protos_dot_matcher__pb2._MATCHER
_SSD.fields_by_name[
'similarity_calculator'].message_type = object__detection_dot_protos_dot_region__similarity__calculator__pb2._REGIONSIMILARITYCALCULATOR
_SSD.fields_by_name['box_predictor'].message_type = object__detection_dot_protos_dot_box__predictor__pb2._BOXPREDICTOR
_SSD.fields_by_name[
'anchor_generator'].message_type = object__detection_dot_protos_dot_anchor__generator__pb2._ANCHORGENERATOR
_SSD.fields_by_name[
'post_processing'].message_type = object__detection_dot_protos_dot_post__processing__pb2._POSTPROCESSING
_SSD.fields_by_name['loss'].message_type = object__detection_dot_protos_dot_losses__pb2._LOSS
_SSDFEATUREEXTRACTOR.fields_by_name[
'conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
_SSDFEATUREEXTRACTOR.fields_by_name['fpn'].message_type = _FEATUREPYRAMIDNETWORKS
DESCRIPTOR.message_types_by_name['Ssd'] = _SSD
DESCRIPTOR.message_types_by_name['SsdFeatureExtractor'] = _SSDFEATUREEXTRACTOR
DESCRIPTOR.message_types_by_name['FeaturePyramidNetworks'] = _FEATUREPYRAMIDNETWORKS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Ssd = _reflection.GeneratedProtocolMessageType('Ssd', (_message.Message,), dict(
DESCRIPTOR=_SSD,
__module__='object_detection.protos.ssd_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.Ssd)
))
_sym_db.RegisterMessage(Ssd)
SsdFeatureExtractor = _reflection.GeneratedProtocolMessageType('SsdFeatureExtractor', (_message.Message,), dict(
DESCRIPTOR=_SSDFEATUREEXTRACTOR,
__module__='object_detection.protos.ssd_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.SsdFeatureExtractor)
))
_sym_db.RegisterMessage(SsdFeatureExtractor)
FeaturePyramidNetworks = _reflection.GeneratedProtocolMessageType('FeaturePyramidNetworks', (_message.Message,), dict(
DESCRIPTOR=_FEATUREPYRAMIDNETWORKS,
__module__='object_detection.protos.ssd_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.FeaturePyramidNetworks)
))
_sym_db.RegisterMessage(FeaturePyramidNetworks)
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
4bec73da24b04e8ce1af04ca95cda3cfc8369b6e | 9dab41a71bf19a9ad17ee3e9f77c0f58aebd1d6d | /python/uline/uline/uline/handlers/api/risk/urls.py | 1cf460fe48872c54f9c6f0ee70bdc2f31883a7c4 | [] | no_license | apollowesley/Demo | f0ef8ec6c4ceb0aec76771da8dd9a62fb579eac8 | 471c4af95d3a7222d6933afc571a8e52e8fe4aee | refs/heads/master | 2021-02-15T04:01:51.590697 | 2018-01-29T01:44:29 | 2018-01-29T01:44:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from tornado.web import URLSpec as url
from uline.handlers.api.risk.alipay_risk import AlipayTransactionRiskNotify
# 前缀/api/fee/
urls = [
url(r'/alipay', AlipayTransactionRiskNotify)
]
| [
"[email protected]"
] | |
50b551d144ecadac213a7c075f1701bc50f5ea45 | e64b6966665a0964e382953a96df1ebe1a41cf10 | /0001-0100/0096-Unique Binary Search Trees/0096-Unique Binary Search Trees.py | 277969006a390820311ba754da3b175119ed218e | [
"MIT"
] | permissive | deepbas/LeetCode | 8dfbb2b0b88b32c01033e6eabd8a3641c9a57083 | a93f907f03cb3861e6858370f57129e01563fe5a | refs/heads/master | 2020-08-01T14:14:38.649057 | 2019-09-09T01:51:42 | 2019-09-09T01:51:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | class Solution:
def numTrees(self, n: int) -> int:
dp = [0] * (n + 1)
dp[0] = 1
for j in range(1, n + 1):
for i in range(j):
dp[j] += dp[i] * dp[j - i - 1]
return dp[n]
| [
"[email protected]"
] | |
412857054cb500dfa2b27027186cf74f4f227492 | 25ebc03b92df764ff0a6c70c14c2848a49fe1b0b | /daily/20180525/example_structlog/fixedlog/__init__.py | f62cd854d6508bd55d2fb0d39b93de3b120d5ff5 | [] | no_license | podhmo/individual-sandbox | 18db414fafd061568d0d5e993b8f8069867dfcfb | cafee43b4cf51a321f4e2c3f9949ac53eece4b15 | refs/heads/master | 2023-07-23T07:06:57.944539 | 2023-07-09T11:45:53 | 2023-07-09T11:45:53 | 61,940,197 | 6 | 0 | null | 2022-10-19T05:01:17 | 2016-06-25T11:27:04 | Python | UTF-8 | Python | false | false | 340 | py | import structlog
def get_logger(name, *args, **kwargs):
logger = structlog.get_logger(name, source=name)
return logger
DEFAULT_PROCESSORS = [
structlog.processors.JSONRenderer(),
]
def setup(*args, **kwargs):
kwargs["processors"] = kwargs.pop("processors", DEFAULT_PROCESSORS)
structlog.configure(*args, **kwargs)
| [
"[email protected]"
] | |
bb02b542b17a538bf4ce6df589bf32f9432df22d | 010c5fbc97731286be00028ff33fc981d943bca3 | /primal/src/code/impute/impute/dev/plot_segments.py | 230cd92d5c372b17af8ef68bb5ce21fcfd9e04d8 | [] | no_license | orenlivne/ober | 6ce41e0f75d3a8baebc53e28d7f6ae4aeb645f30 | 810b16b2611f32c191182042240851152784edea | refs/heads/master | 2021-01-23T13:48:49.172653 | 2014-04-03T13:57:44 | 2014-04-03T13:57:44 | 6,902,212 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | '''
Created on Jan 31, 2013
@author: oren
'''
import sys, matplotlib.pylab as P, numpy as np
if __name__ == '__main__':
s = np.loadtxt(sys.argv[1])
P.figure(1)
P.clf()
P.hist((s[:, 3] - s[:, 2]) / 1e6, 50)
P.xlabel('Length [Mbp]')
P.ylabel('Frequency')
P.title('IBD Segment Length Distribution in the Hutterites')
| [
"[email protected]"
] | |
2c797dafb38c53aaf146fd45cba67212fe584319 | dacb2bba2c91877c5157ccb8ab34e112abfea0ee | /projects/project_12/src/navigation/scripts/waypoint_logger.py | 15828bacbbada3e4a83c3efa3875b7e5f4697bdd | [] | no_license | amuamushu/projects-2020-2021 | 7fd4e29a8f51406ded59a97cd878a5752ffc700b | f1c385e46d2d5475b28dec91b57a933ac81c23c5 | refs/heads/main | 2023-04-01T11:47:55.935278 | 2021-03-28T00:37:29 | 2021-03-28T00:37:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,538 | py | #!/usr/bin/env python
import pandas as pd
import rospy
from nav_msgs.msg import Odometry
class WaypointLogger:
def __init__(self):
self.output_name = rospy.get_param("~wp_log_output", "saved_waypoints.csv")
self.odom_topic = rospy.get_param("~odom_topic", "/vehicle/odom")
# save waypoints from odometry in csv
self.wps = []
self.current_count = 0
self.logging_time = 100
rospy.Subscriber(self.odom_topic, Odometry, self.odom_callback)
rospy.loginfo("Started Odometry")
def odom_callback(self, data):
waypoint = (data.pose.pose.position.x,
data.pose.pose.position.y,
data.pose.pose.position.z,
data.pose.pose.orientation.x,
data.pose.pose.orientation.y,
data.pose.pose.orientation.z,
data.pose.pose.orientation.w)
self.wps.append(waypoint)
# Save waypoint logs every self.logging_time iterations
if self.current_count % self.logging_time == 0:
rospy.logerr(f"saving odometry logs to {self.output_name}")
self.df_logs = pd.DataFrame(self.wps, columns=["x", "y", "z", "qx", "qy", "qz", "qw"])
self.df_logs.to_csv(self.output_name, index=False)
self.current_count += 1
if __name__ == '__main__':
try:
rospy.init_node('waypoint_logger', anonymous=True)
WaypointLogger()
rospy.spin()
except rospy.ROSInterruptException:
pass
| [
"[email protected]"
] | |
2d38b298243ab9608a59b9b5c60b83576b6a368b | 25ebc03b92df764ff0a6c70c14c2848a49fe1b0b | /daily/20200414/codes/output/code111.py | e947c90eb401915b8fe4acb6b8e2111b71fd0060 | [] | no_license | podhmo/individual-sandbox | 18db414fafd061568d0d5e993b8f8069867dfcfb | cafee43b4cf51a321f4e2c3f9949ac53eece4b15 | refs/heads/master | 2023-07-23T07:06:57.944539 | 2023-07-09T11:45:53 | 2023-07-09T11:45:53 | 61,940,197 | 6 | 0 | null | 2022-10-19T05:01:17 | 2016-06-25T11:27:04 | Python | UTF-8 | Python | false | false | 410 | py | import pygal
box_plot = pygal.Box(box_mode="pstdev")
box_plot.title = 'V8 benchmark results'
box_plot.add('Chrome', [6395, 8212, 7520, 7218, 12464, 1660, 2123, 8607])
box_plot.add('Firefox', [7473, 8099, 11700, 2651, 6361, 1044, 3797, 9450])
box_plot.add('Opera', [3472, 2933, 4203, 5229, 5810, 1828, 9013, 4669])
box_plot.add('IE', [43, 41, 59, 79, 144, 136, 34, 102])
print(box_plot.render(is_unicode=True))
| [
"[email protected]"
] | |
afb9fc8593fbbc1f50b77535b5f02b2d020e9683 | fce8a56e09739bad6b0953fc3d890292bb2b7f31 | /RedditScraperService/FileWriting.py | 30dcc3aaadff30e3bffd6fc5040aac23ee460864 | [] | no_license | whorst/WsbInvesting | d69f84af2e9bd81560ee8b88d7e882f022350aae | e48ea1a2f3434ab3084746ecce4012c656800280 | refs/heads/master | 2022-11-26T00:23:24.887989 | 2020-08-06T03:04:16 | 2020-08-06T03:04:16 | 284,287,474 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | def writeRidiculouslyHighOrLowToFile(comment):
outFile = open("resources/files/ridiculouslyHighOrLowReason", "a")
outFile.write(comment)
outFile.write("\n\n")
outFile.close()
def writeClosePositionFailureToFile(msg):
outFile = open("resources/files/closePositionFailure", "a")
outFile.write(msg+"\n\n")
outFile.close()
def writeValidPositionsToFile(comment, newPosition):
outFile = open("resources/files/commentFileOut", "a")
outFile.write("\n\n")
outFile.write(comment)
outFile.write("\n")
outFile.write(newPosition.__str__())
outFile.write("\n\n")
outFile.close() | [
"[email protected]"
] | |
8b9a6b1fa210fa7d31ae50de5904a35240c8326d | 7c8da5b1fa05c6d6799aa3d0aef0fadb215f04db | /example/vis.py | 97ff149c6340e0ad47c5b8a30f5d81210c343df5 | [] | no_license | jackd/tiny_imagenet | a71a7b2a3b328dd6e2a06ec37cf4da365b837d32 | 1669940c8ec6ecc6d38a7094b4a00a7182020603 | refs/heads/master | 2020-04-12T08:29:34.255095 | 2018-12-19T05:12:05 | 2018-12-19T05:12:05 | 162,386,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | #!/usr/bin/python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from tiny_imagenet.manager import TinyImagenetManager, N_VAL_EXAMPLES
man = TinyImagenetManager()
print(man.extract_dir)
ids = man.load_wordnet_ids()
indices = {wnid: i for i, wnid in enumerate(ids)}
val_ids, bboxes = man.load_val_annotations()
wnids = man.load_wordnet_ids()
words = man.load_words()
class_words = [words[wnid] for wnid in wnids]
val_indices = [indices[i] for i in val_ids]
for i in range(N_VAL_EXAMPLES):
image = Image.open(man.val_image_path(i))
class_index = val_indices[i]
plt.imshow(np.array(image))
plt.title('%d: %s' % (class_index, class_words[class_index]))
plt.show()
| [
"[email protected]"
] | |
4315e3bdde66e49919a7e3977603a8efd6504cd7 | 7f7e5e86dde3dbae74d5bc059d31d26184553287 | /docs/conf.py | d449e6c2811de5ea921cceb2e1dd288fe4382eb5 | [
"MIT"
] | permissive | CRFS/python3-ncplib | 9ada935dcecbc5d99ab03a8e90e16c1f1178f136 | 1f3d527c71749e63a28f772b7b8d9442281889be | refs/heads/master | 2023-05-11T09:46:25.292060 | 2023-04-30T11:34:23 | 2023-04-30T11:34:23 | 37,270,803 | 4 | 2 | MIT | 2023-04-30T11:34:24 | 2015-06-11T15:51:08 | Python | UTF-8 | Python | false | false | 9,604 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# ncplib documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 2 08:41:36 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os
import pkg_resources
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx"]
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'ncplib'
copyright = '2016, CRFS'
author = 'Dave Hall'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = pkg_resources.require("ncplib")[0].version
# The short X.Y version.
version = '.'.join(str(x) for x in release.split(".")[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', '_include', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
suppress_warnings = ["image.nonlocal_uri"]
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# Use RTD theme locally.
if not os.environ.get('READTHEDOCS', None) == 'True':
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'ncplib v1.10.3'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ncplibdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ncplib.tex', 'ncplib Documentation',
'Dave Hall', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ncplib', 'ncplib Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ncplib', 'ncplib Documentation',
author, 'ncplib', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| [
"[email protected]"
] | |
ad13fe4d4034323194ccb0aa7d343a294e231820 | 7bb9bd2bdadef1590b2ef7ff309e08abf454e49d | /Tests/pydeijao.py | 1c0590f6fee34dceac4db08a964cc1f5fc772b52 | [] | no_license | ALREstevam/Curso-de-Python-e-Programacao-com-Python | afdf12717a710f20d4513d5df375ba63ba1e1c19 | af6227376736e63810e5979be54eb1c433d669ac | refs/heads/master | 2021-09-07T12:11:17.158298 | 2018-02-22T17:47:19 | 2018-02-22T17:47:19 | 87,453,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | import requests as Rq
from bs4 import BeautifulSoup as Bs
link = 'http://www.pfl.unicamp.br/Rest/view/site/cardapio.php'
page = Rq.get(link)
soup = Bs(page.content, 'html.parser', from_encoding='iso-8859-1')
soup = Bs(page.content, 'html.parser')
def formatInput(text):
text = text.replace('\n', '')
text = text.replace('\r', '')
text = text.replace(': ', ':')
text = text.replace(' - ', '-')
text = text.strip()
return text
html = []
html.append(formatInput(soup.find_all('tr')[0].getText()))
for i in range(4, 11):
html.append(formatInput(soup.find_all('tr')[i].getText()))
print('\n\n')
print('{:^50s}'.format(html[0].split('-')[0].upper()))
print()
print('{:^50s}'.format(html[0].split('-')[1]))
print('-'*57)
broke = []
for elem in html[2:11]:
broke.append(elem.split(':'))
for tup in broke:
for elem in tup:
elem = formatInput(elem)
for tup in broke:
print('| {:20s} | {:<30s} |'.format(tup[0], tup[1].lower()))
print('-'*57)
input() | [
"[email protected]"
] | |
ad841ea0fa5e33e9fa357d47590200fdfde6347c | dadd814aceb7ad6698107dea474f92855f79ba51 | /ReplicatedStochasticGradientDescent/rsgd/ReplicatedStochasticGradientDescent.py | 978541c75ccda28c54978f4ee9d18d6fed0ffb8a | [
"MIT"
] | permissive | Nico-Curti/rSGD | 16e41524be2dd8d4988a5ecd368d3ac72d072ffe | b1f72c06a7f68c04fc97aaeae45d75852b541d42 | refs/heads/master | 2020-05-20T15:18:33.520000 | 2019-10-01T14:00:40 | 2019-10-01T14:00:40 | 185,641,977 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,828 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
import pickle
import pandas as pd
import numpy as np
import warnings
from scipy.special import erf
from .Patterns import Pattern
from .misc import _check_string
from lib.ReplicatedStochasticGradientDescent.rSGD import _rsgd
from lib.ReplicatedStochasticGradientDescent.rSGD import _predict
import multiprocessing
__package__ = "ReplicatedStochasticGradientDescent"
__author__ = ["Nico Curti ([email protected])", "Daniele Dall'Olio ([email protected])"]
NTH = multiprocessing.cpu_count()
class ReplicatedStochasticGradientDescent():
def __init__(self, K=1, formula='simple', max_iter=1000, seed=135, init_equal=True, waitcenter=False, center=False):
'''
'''
if formula not in ['simple', 'hard', 'continuous', 'corrected']:
raise TypeError('Invalid iteration scheme. Allowed values are ["simple", "hard", "continuous", "corrected"]')
self._K = K
self._formula = formula
self._max_iter = max_iter
self._seed = seed
self._init_equal = init_equal
self._waitcenter = waitcenter
self._center = center
self._weights = None
self._fit = False
def predict(self, X):
'''
Predict the new labels computed by ReplicatedStochasticGradientDescent model
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
'''
if not self._fit:
raise ValueError('ReplicatedStochasticGradientDescent classifier is not fitted yet. Please use the fit method before predict')
if not self._weights:
raise ValueError("Weights must be computed before predict function. Use 'fit' function or 'load_weights' to read them from file")
if isinstance(X, Pattern):
testset = X # use this with c++ version
else:
testset = Pattern(X, []) # use this with c++ version
N, K = np.shape(X)
# miss check dimensions
predicted_labels = _predict(testset, self._weights.ravel().astype('int64'), N)
return predicted_labels
def fit(self, X, y=None, parameters={'y' : 1, 'eta': (2., 1.), 'lambda' : (.1, 1.), 'gamma' : (float('Inf'), .01) }, nth=NTH):
'''
Fit the ReplicatedStochasticGradientDescent model meta-transformer
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values (integers that correspond to classes in
classification, real numbers in regression).
Returns
-------
self : object
Returns self.
'''
self._fit = False
if isinstance(X, Pattern):
pattern = X
else:
pattern = Pattern(X, y)
self._weights = _rsgd(pattern=pattern.pattern,
K=self._K,
y=parameters['y'],
eta=parameters['eta'],
lamda=parameters['lambda'],
gamma=parameters['gamma'],
formula=self._formula,
seed=self._seed,
max_iter=self._max_iter,
init_equal=self._init_equal,
waitcenter=self._waitcenter,
center=self._center,
nth=nth
)
self._fit = True
return self
def load_weights(self, weightfile, delimiter='\t', binary=False):
'''
Load weights from file
Parameters
----------
weightfile : string
Filename of weights
delimiter : char
Separator for ascii loading
binary : bool
Switch between binary and ascii loading style
Returns
-------
self
'''
if binary:
with open(weightfile, 'rb') as fp:
self._weights = pickle.load(fp)
else:
self._weights = pd.read_csv(weightfile, sep=delimiter, header=None).values.tolist()
self._fit = True
return self
def save_weights(self, weightfile, delimiter='\t', binary=False):
'''
Load weights from file
Parameters
----------
weightfile : string
Filename to dump the weights
delimiter : char
Separator for ascii dump
binary : bool
Switch between binary and ascii dumping style
'''
if binary:
with open(weightfile, 'wb') as fp:
pickle.dump(self._weights, fp)
else:
pd.DataFrame(self._weights).to_csv(weightfile, sep=delimiter, header=False, index=False)
def __repr__(self):
class_name = self.__class__.__name__
return '<{} Class>'.format(class_name)
| [
"[email protected]"
] | |
5e9517f7cd2ef1665d41f8b905ff72df96c9955c | 6e5ab77fee1fb4a0310213dd8c6dd8601828b1b9 | /Algorithm/Swea/D3_10761.py | 32b83f64fe69ec3ebfcdef0f23b68b5069769425 | [] | no_license | hongyong3/TIL | 36d031c0da9e3e6db3eebb977bd3e12df00a849f | 7f1492128e957a78fc95b255f4f7f2978161e471 | refs/heads/master | 2023-08-19T09:16:03.231757 | 2023-08-18T09:38:47 | 2023-08-18T09:38:47 | 162,100,258 | 1 | 0 | null | 2023-02-11T00:52:32 | 2018-12-17T08:42:42 | Jupyter Notebook | UTF-8 | Python | false | false | 1,755 | py | import sys
sys.stdin = open("D3_10761_input.txt", "r")
def solve1(a, b, ad, bd):
if a[0][2] > ad:
ad += 1
T = int(input())
for test_case in range(T):
data = input().split()[1:]
O, B, k = [], [], 1 # [k, name, distance]; 순서, name, 거리
ODist, BDist = 1, 1
ans = 0
while data:
if data[0] == 'B':
B.append([k, data.pop(0), int(data.pop(0))])
else:
O.append([k, data.pop(0), int(data.pop(0))])
k += 1
while O or B:
if O and B:
if O[0][0] > B[0][0]:
if B[0][2] > BDist:
BDist += 1
elif B[0][2] == BDist:
B.pop(0)
else:
BDist -= 1
if O[0][2] > ODist:
ODist += 1
elif O[0][2] == ODist:
pass
else:
ODist -= 1
else:
if O[0][2] > ODist:
ODist += 1
elif O[0][2] == ODist:
O.pop(0)
else:
ODist -= 1
if B[0][2] > BDist:
BDist += 1
elif B[0][2] == BDist:
pass
else:
BDist -= 1
elif O and not B:
if O[0][2] > ODist:
ODist += 1
elif O[0][2] == ODist:
O.pop(0)
else:
ODist -= 1
else:
if B[0][2] > BDist:
BDist += 1
elif B[0][2] == BDist:
B.pop(0)
else:
BDist -= 1
ans += 1
print("#{} {}".format(test_case + 1, ans)) | [
"[email protected]"
] | |
027615d70d9fdcdf0d7bf877bb460b3469d5d748 | 046207f434966462fff55f634ba5a450d2208534 | /APSS/hanoi.py | 21b4fc20625f00db38af0fb461694eb31d1690ae | [] | no_license | sungsikyang92/pythonStudy | e293e1ac8af443809f840ccee7052a8f57480b70 | 26522b5e232ccd9ab25c52122d254aa7249a8fdf | refs/heads/master | 2023-07-04T16:58:40.318976 | 2021-08-04T02:00:27 | 2021-08-04T02:00:27 | 365,398,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | def hanoi_tower(n, start, end):
if n == 1:
print(start, end)
return
hanoi_tower(n - 1, start, 6 - start - end) # 1단계
print(start, end) # 2단계
hanoi_tower(n - 1, 6 - start - end, end) # 3단계
n = int(input())
print(2 ** n - 1)
hanoi_tower(n, 1, 3)
# 하노이의 탑 # 입력: 옮기려는 원반의 갯수 n
# 옮길 원반이 현재 있는 출발점 기둥 from_pos
# 원반을 옮길 도착점 기둥 to_pos
# 옮기는 과정에서 사용할 보조 기둥 aux_pos
# 출력: 원반을 옮기는 순서
# def hanoi(n, from_pos, to_pos, aux_pos):
# if n == 1: # 원반 한 개를 옮기는 문제면 그냥 옮기면 됨
# print(from_pos, "->", to_pos)
# return
# # 원반 n - 1개를 aux_pos로 이동(to_pos를 보조 기둥으로)
# hanoi(n - 1, from_pos, aux_pos, to_pos)
# # 가장 큰 원반을 목적지로 이동
# print(from_pos, "->", to_pos)
# # aux_pos에 있는 원반 n-1개를 목적지로 이동(from_pos를 보조 기둥으로)
# hanoi(n - 1, aux_pos, to_pos, from_pos)
#
# print("n = 1")
# hanoi(1, 1, 3, 2) # 원반 한 개를 1번 기둥에서 3번 기둥으로 이동(2번을 보조 기둥으로)
# print("n = 2")
# hanoi(2, 1, 3, 2) # 원반 두 개를 1번 기둥에서 3번 기둥으로 이동(2번을 보조 기둥으로)
# print("n = 3")
# hanoi(3, 1, 3, 2) # 원반 세 개를 1번 기둥에서 3번 기둥으로 이동(2번을 보조 기둥으로)
| [
"[email protected]"
] | |
a0fc06bf78776729139fe2a1d000a1a3dc8067cd | 6c49c40d35d485c6fa7f9b92358b0888751b1dbe | /data/QSO_CIV_catalogs/matchingHam17toKoz17.py | d1e772e4082e9901d45e7d1b9c9c7bb813dfb224 | [] | no_license | d80b2t/CIV_CLQs | 7f8bfa954f29e9516ddb9ce80c59fffe9ce8235b | 95ea176c8f5ab6ee0c19d42aab964ebfd8bd0ce8 | refs/heads/master | 2020-08-31T20:56:17.168339 | 2020-07-30T13:35:19 | 2020-07-30T13:35:19 | 218,782,627 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,064 | py |
import numpy as np
import pandas as pd
from pandas import DataFrame
from astropy.io import fits
from astropy.io import ascii
from astropy.table import Table
## Hamann et al. (2017) ERQ BOSS DR12 catalog
path = '/cos_pc19a_npr/data/ERQs/Hamann2017_CIVcatalog/'
filename = 'C4N5REWs_DR12v11_MNRAS.fits'
infile = path+filename
data_full = fits.open(infile)
Ham17_full = data_full[1].data
#with fits.open(infile) as data:
# df_Ham17 = pd.DataFrame(data[0].data)
## knocking out a couple of objects with bad REW values
#Ham17 = Ham17_full[np.where( (Ham17['rew'] >0.) & (Ham17['rew'] < 10000.) )]
Ham17 = Ham17_full
## Kozłowski_2017_ApJS_228_9. BOSS DR12 "Value Added" catalog
path = '/cos_pc19a_npr/programs/quasars/CIV_CLQs/data/QSO_CIV_catalogs/'
filename = 'SDSS-DR12Q-BH_extra.fits'
infile = path+filename
data_full = fits.open(infile)
Koz17_full = data_full[1].data
Koz17 = Koz17_full
## astropy Table to pandas DataFrame #2804
## https://github.com/astropy/astropy/issues/2804
Ham17_table = Table(Ham17)
Ham17_df = DataFrame(np.array(Ham17_table))
print('len(Ham17_df)', len(Ham17_df))
Koz17_table = Table(Koz17)
## Have to remove this '5 value' columns in order for the
## DataFrame swith-aroo to work..
Koz17_table.remove_column('PSFFLUX')
Koz17_table.remove_column('IVAR_PSFFLUX')
Koz17_table.remove_column('PSFMAG')
Koz17_table.remove_column('ERR_PSFMAG')
## Some nice DataFrame polish/manipulation
Koz17_df = DataFrame(np.array(Koz17_table))
print('len(Koz17_df)', len(Koz17_df))
## Columns names are case senstive!
Koz17_df.rename(columns={'SDSS_NAME':'sdss_name'}, inplace=True)
## Testing on a wee bit of the DataFrame...
mini_merge = pd.merge(Ham17_df[0:100], Koz17_df[0:100], on="sdss_name")
The_DF = pd.merge(Ham17_df, Koz17_df, on="sdss_name")
## Just wanting to write a few things out to a simple text file.
file1 = open("temp.txt","w+")
#for ii in range(len(Koz17)): # if the full catalog is wanted!
for ii in range(100):
print(ii)
name = Ham17[np.where((Koz17['SDSS_NAME'][ii] == Ham17['SDSS_NAME']))]['SDSS_NAME']
REW = Ham17[np.where((Koz17['SDSS_NAME'][ii] == Ham17['SDSS_NAME']))]['rew']
bal_flag_vi = Ham17[np.where((Koz17['SDSS_NAME'][ii] == Ham17['SDSS_NAME']))]['bal_flag_vi']
f1450 = Ham17[np.where((Koz17['SDSS_NAME'][ii] == Ham17['SDSS_NAME']))]['f1450']
if (len(name) > 0 and bal_flag_vi <1) :
#print(ii, Koz17['SDSS_NAME'][ii], name, REW, f1450)
file1.write(str(ii)+str(Koz17['SDSS_NAME'][ii])+str(we_name)+str(we_rew))
file1.write(" {} {} {} {} {} {} {} {} {} \n".format(ii, name, Koz17['RA'][ii], Koz17['DEC'][ii],
#bal_flag_vi,
REW, f1450,
Koz17['L1350'][ii], Koz17['LBol'][ii], #Koz17['eLBol'][ii],
Koz17['nEdd'][ii]))
file1.close()
| [
"[email protected]"
] | |
d372b40b45954581e27277427a4693ec3ac1125d | d92ce9a32bf20086e30701585a4e73c1f2469aff | /Programs/dsaenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__init__.py | 0a8b952fb5bfc53a50be9c2858538e2322e08730 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | Prem-chouhan/fellowshipProgram_PremsinghChouhan | f61cf4407458f14ef7eb6d80effb25f9592d2552 | 33e6b57f6c75a80d8a3d1f868d379e85365a1336 | refs/heads/master | 2020-09-14T12:45:16.269268 | 2019-12-23T14:24:10 | 2019-12-23T14:24:10 | 223,128,906 | 0 | 1 | null | 2020-07-22T11:50:46 | 2019-11-21T08:47:28 | Python | UTF-8 | Python | false | false | 519 | py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this inventorymanager
# for complete details.
from __future__ import absolute_import, division, print_function
from .__about__ import (
__author__, __copyright__, __email__, __license__, __summary__, __title__,
__uri__, __version__
)
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
| [
"[email protected]"
] | |
094cca08d2e6669c3f9919555d9985f0bd653d5a | 1ed536ef1527e6655217e731f622d643ece49c2b | /scripts/links2fasta.py | 1c68c928d99a83eb5b7a1d4e7307d3cfc697cb29 | [] | no_license | siping/cgat | de0f7af124eb38c72d7dece78fff83ff92ddbf96 | aa4cc85ffdc53998ea1a5ac5516df2d16c254d2e | refs/heads/master | 2021-01-22T13:03:18.060139 | 2013-10-07T15:53:55 | 2013-10-07T15:53:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,378 | py | '''
links2fasta.py - convert links into alignments
==============================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python links2fasta.py --help
Type::
python links2fasta.py --help
for command line help.
Command line options
--------------------
'''
import os
import sys
import string
import re
import getopt
import tempfile
import time
import optparse
import CGAT.Experiment as E
import CGAT.BlastAlignments as BlastAlignments
import alignlib
import CGAT.Genomics as Genomics
import CGAT.Exons as Exons
import CGAT.FastaIterator as FastaIterator
class Map:
def __init__(self):
pass
def read( self, line ):
try:
( self.mToken,
self.mOldFrom, self.mOldTo, self.mOldAli,
self.mNewFrom, self.mNewTo, self.mNewAli,
self.mOldLength, self.mNewLength) = line[:-1].split("\t")
except ValueError:
raise ValueError("parsing error in line\n%s" % line)
(self.mOldFrom, self.mOldTo, self.mNewFrom, self.mNewTo) = \
map(int, (self.mOldFrom, self.mOldTo, self.mNewFrom, self.mNewTo))
self.mMapOld2New = None
def expand( self ):
if not self.mMapOld2New:
self.mMapOld2New = alignlib.makeAlignmentVector()
alignlib.AlignmentFormatEmissions(
self.mOldFrom, self.mOldAli,
self.mNewFrom, self.mNewAli).copy( self.mMapOld2New )
def clear( self ):
if self.mMapOld2New:
self.mMapOld2New.clear()
self.mMapOld2New = None
def __str__(self):
return string.join(map(str, (self.mToken,
self.mOldFrom, self.mOldTo, self.mOldAli,
self.mNewFrom, self.mNewTo, self.mNewAli,
self.mOldLength, self.mNewLength)), "\t")
def ScaleAlignment( alignment, factor ):
"""scale alignment string."""
data = re.split("[+-]", alignment[1:])
data = map( lambda x: int(x) * factor, data )
signs = [ "+", "-" ] * (1 + len(data) / 2)
if alignment[0] == "+":
del signs[-1]
else:
del signs[0]
s = map( lambda x,y: "%s%i" % (x,y), signs, data)
return string.join(s, "")
##-------------------------------------------------------------------------
def Write( map_row2col, row_seq, col_seq, link,
no_gaps = False, no_identical = False,
min_length = 0,
suffix1="", suffix2="",
outfile = None,
pair_filter = None,
format = "fasta" ):
"""write alignment based on map_row2col."""
status = None
filter_status = "new"
if map_row2col.getLength() == 0:
status = "empty"
if not status:
f = alignlib.AlignmentFormatExplicit( map_row2col, row_seq, col_seq )
row_from = map_row2col.getRowFrom()
row_to = map_row2col.getRowTo()
col_from = map_row2col.getColFrom()
col_to = map_row2col.getColTo()
row_ali, col_ali = f.mRowAlignment, f.mColAlignment
if not status:
if no_gaps:
# remove gaps from fasta
r = []
c = []
for x in range(len(row_ali)):
if row_ali[x] != "-" and col_ali[x] != "-":
r.append( row_ali[x] )
c.append( col_ali[x] )
row_ali = string.join(r, "")
col_ali = string.join(c, "")
if not status and len(row_ali) < min_length:
status = "length"
if not status and no_identical:
if row_ali == col_ali:
status = "identical"
if not status:
if pair_filter:
id = "%s-%s" % (link.mQueryToken, link.mSbjctToken)
if id in pair_filter:
h = Genomics.GetHID( row_ali + ";" + col_ali )
if h in pair_filter[id]:
filter_status = "old"
translation1 = Genomics.TranslateDNA2Protein( row_ali )
translation2 = Genomics.TranslateDNA2Protein( col_ali )
if "X" in translation1 or "x" in translation2:
status = "stops"
else:
status = "success"
if filter_status == "new":
if format == "fasta":
print ">%s%s %s %s\n%s\n>%s%s %s %s\n%s" % (link.mQueryToken, suffix1, row_from, row_to, row_ali,
link.mSbjctToken, suffix2, col_from, col_to, col_ali )
elif format == "dummy":
pass
else:
raise ValueError("unknown format")
if outfile:
outfile.write( "%s%s\t%s%s\t%s\t%i\t%s\n" % (link.mQueryToken, suffix1, link.mSbjctToken, suffix2,
status, map_row2col.getLength(), filter_status ) )
return status
def GetAdjustedBoundaries( id, exons ):
"""return codon adjusted exon boundaries."""
f, t = exons[id].mPeptideFrom, exons[id].mPeptideTo
f += exons[id].frame
if id < len(exons) -1:
next_frame = exons[id+1].frame
else:
next_frame = 0
if next_frame:
t -= 3 - next_frame
return f, t
if __name__ == '__main__':
parser = E.OptionParser( version = "%prog version: $Id: links2fasta.py 2446 2009-01-27 16:32:35Z andreas $", usage = globals()["__doc__"] )
parser.add_option( "-s", "--sequences", dest="filename_sequences", type="string",
help="peptide sequence [Default=%default]" )
parser.add_option( "-f", "--format", dest="format", type="string",
help="output format [Default=%default]" )
parser.add_option( "-e", "--expand", dest="expand", action="store_true",
help="expand positions from peptide to nucleotide alignment [Default=%default]")
parser.add_option( "-m", "--map", dest="filename_map", type="string",
help="map alignments [Default=%default]")
parser.add_option( "-c", "--codons", dest="require_codons", action="store_true",
help="require codons [Default=%default]")
parser.add_option( "--one-based-coordinates", dest="one_based_coordinates", action="store_true",
help="expect one-based coordinates. The default are zero based coordinates [Default=%default].")
parser.add_option( "--no-identical", dest="no_identical", action="store_true",
help="do not output identical pairs [Default=%default]" )
parser.add_option( "-g", "--no-gaps", dest="no_gaps", action="store_true",
help="remove all gaps from aligned sequences [Default=%default]")
parser.add_option( "-x", "--exons", dest="filename_exons", type="string",
help="filename with exon boundaries [Default=%default]")
parser.add_option( "-o", "--outfile", dest="filename_outfile", type="string",
help="filename to save links [Default=%default]")
parser.add_option( "--min-length", dest="min_length", type="int",
help="minimum length of alignment [Default=%default]")
parser.add_option( "--filter", dest="filename_filter", type="string",
help="given a set of previous alignments, only write new pairs [Default=%default].")
parser.set_defaults(
filename_sequences = None,
filename_exons = None,
filename_map = None,
filename_outfile = None,
no_gaps = False,
format = "fasta",
expand = False,
require_codons = False,
no_identical = False,
min_length = 0,
report_step = 100,
one_based_coordinates = False,
filename_filter = None)
(options, args) = E.Start( parser, add_mysql_options = True )
t0 = time.time()
if options.filename_sequences:
sequences = Genomics.ReadPeptideSequences( open(options.filename_sequences, "r") )
else:
sequences = {}
if options.loglevel >= 1:
options.stdlog.write( "# read %i sequences\n" % len(sequences) )
sys.stdout.flush()
if options.filename_exons:
exons = Exons.ReadExonBoundaries( open(options.filename_exons, "r") )
else:
exons = {}
if options.loglevel >= 1:
options.stdlog.write( "# read %i exons\n" % len(exons) )
sys.stdout.flush()
if options.filename_map:
map_old2new = {}
for line in open(options.filename_map, "r"):
if line[0] == "#": continue
m = Map()
m.read( line )
map_old2new[m.mToken] = m
else:
map_old2new = {}
if options.loglevel >= 1:
options.stdlog.write( "# read %i maps\n" % len(map_old2new) )
sys.stdout.flush()
if options.filename_filter:
if options.loglevel >= 1:
options.stdlog.write( "# reading filtering information.\n" )
sys.stdout.flush()
map_pair2hids = {}
if os.path.exists( options.filename_filter ):
infile = open(options.filename_filter, "r")
iterator = FastaIterator.FastaIterator( infile )
while 1:
cur_record = iterator.next()
if cur_record is None: break
record1 = cur_record
cur_record = iterator.next()
if cur_record is None: break
record2 = cur_record
identifier1 = re.match("(\S+)", record1.title).groups()[0]
identifier2 = re.match("(\S+)", record2.title).groups()[0]
id = "%s-%s" % (identifier1, identifier2)
s = Genomics.GetHID(record1.sequence + ";" + record2.sequence)
if id not in map_pair2hids: map_pair2hids[id] = []
map_pair2hids[id].append( s )
infile.close()
if options.loglevel >= 1:
options.stdlog.write( "# read filtering information for %i pairs.\n" % len(map_pair2hids) )
sys.stdout.flush()
else:
map_pair2hids = None
if options.loglevel >= 1:
options.stdlog.write( "# finished input in %i seconds.\n" % (time.time() - t0))
if options.filename_outfile:
outfile = open(options.filename_outfile, "w")
else:
outfile = None
map_row2col = alignlib.makeAlignmentVector()
tmp1_map_row2col = alignlib.makeAlignmentVector()
counts = {}
iterations = 0
t1 = time.time()
ninput, nskipped, noutput = 0, 0, 0
for link in BlastAlignments.iterator_links( sys.stdin ):
iterations += 1
ninput += 1
if options.loglevel >= 1:
if (iterations % options.report_step == 0):
options.stdlog.write( "# iterations: %i in %i seconds.\n" % (iterations, time.time() - t1) )
sys.stdout.flush()
if link.mQueryToken not in sequences or \
link.mSbjctToken not in sequences:
nskipped += 1
continue
if options.loglevel >= 3:
options.stdlog.write( "# read link %s\n" % str(link) )
row_seq = alignlib.makeSequence( sequences[link.mQueryToken] )
col_seq = alignlib.makeSequence( sequences[link.mSbjctToken] )
if options.one_based_coordinates:
link.mQueryFrom -= 1
link.mSbjctFrom -= 1
if options.expand:
link.mQueryFrom = link.mQueryFrom * 3
link.mSbjctFrom = link.mSbjctFrom * 3
link.mQueryAli = ScaleAlignment( link.mQueryAli, 3 )
link.mSbjctAli = ScaleAlignment( link.mSbjctAli, 3 )
map_row2col.clear()
alignlib.AlignmentFormatEmissions(
link.mQueryFrom, link.mQueryAli,
link.mSbjctFrom, link.mSbjctAli ).copy( map_row2col )
if link.mQueryToken in map_old2new:
tmp1_map_row2col.clear()
map_old2new[link.mQueryToken].expand()
if options.loglevel >= 3:
options.stdlog.write( "# combining in row with %s\n" %\
str(alignlib.AlignmentFormatEmissions(map_old2new[link.mQueryToken].mMapOld2New ) ))
alignlib.combineAlignment( tmp1_map_row2col,
map_old2new[link.mQueryToken].mMapOld2New,
map_row2col,
alignlib.RR )
map_old2new[link.mQueryToken].clear()
alignlib.copyAlignment( map_row2col, tmp1_map_row2col )
if link.mSbjctToken in map_old2new:
tmp1_map_row2col.clear()
map_old2new[link.mSbjctToken].expand()
if options.loglevel >= 3:
options.stdlog.write( "# combining in col with %s\n" %\
str(alignlib.AlignmentFormatEmissions(map_old2new[link.mSbjctToken].mMapOld2New ) ))
alignlib.combineAlignment( tmp1_map_row2col,
map_row2col,
map_old2new[link.mSbjctToken].mMapOld2New,
alignlib.CR )
map_old2new[link.mSbjctToken].clear()
alignlib.copyAlignment( map_row2col, tmp1_map_row2col )
dr = row_seq.getLength() - map_row2col.getRowTo()
dc = col_seq.getLength() - map_row2col.getColTo()
if dr < 0 or dc < 0:
raise ValueError("out of bounds alignment: %s-%s: alignment out of bounds. row=%i col=%i ali=%s" %\
(link.mQueryToken,
link.mSbjctToken,
row_seq.getLength(),
col_seq.getLength(),
str(alignlib.AlignmentFormatEmissions(map_row2col))))
if options.loglevel >= 2:
options.stdlog.write( str( alignlib.AlignmentFormatExplicit( map_row2col,
row_seq,
col_seq )) + "\n" )
## check for incomplete codons
if options.require_codons:
naligned = map_row2col.getNumAligned()
# turned off, while fixing alignlib
if naligned % 3 != 0:
options.stdlog.write( "# %s\n" % str(map_row2col) )
options.stdlog.write( "# %s\n" % str(link) )
options.stdlog.write( "# %s\n" % str(map_old2new[link.mQueryToken]) )
options.stdlog.write( "# %s\n" % str(map_old2new[link.mSbjctToken]) )
options.stdlog.write( "#\n%s\n" % alignlib.AlignmentFormatExplicit( map_row2col,
row_seq,
col_seq ) )
raise ValueError("incomplete codons %i in pair %s - %s" % (naligned, link.mQueryToken, link.mSbjctToken))
## if so desired, write on a per exon level:
if exons:
if link.mQueryToken not in exons:
raise IndexError("%s not found in exons" % (link.mQueryToken))
if link.mSbjctToken not in exons:
raise IndexError("%s not found in exons" % (link.mSbjctToken))
exons1 = exons[link.mQueryToken]
exons2 = exons[link.mSbjctToken]
## Get overlapping segments
segments = Exons.MatchExons( map_row2col, exons1, exons2 )
for a,b in segments:
tmp1_map_row2col.clear()
# make sure you got codon boundaries. Note that frameshifts
# in previous exons will cause the codons to start at positions
# different from mod 3. The problem is that I don't know where
# the frameshifts occur exactly. The exon boundaries are given
# with respect to the cds, which include the frame shifts.
# Unfortunately, phase information seems to be incomplete in the input files.
from1, to1 = GetAdjustedBoundaries( a, exons1 )
from2, to2 = GetAdjustedBoundaries( b, exons2 )
alignlib.copyAlignment( tmp1_map_row2col, map_row2col,
from1+1, to1, from2+1, to2 )
mode = Write( tmp1_map_row2col, row_seq, col_seq, link,
no_gaps = options.no_gaps,
no_identical = options.no_identical,
min_length = options.min_length,
suffix1="_%s" % str(a),
suffix2="_%s" % str(b),
outfile = outfile,
pair_filter = map_pair2hid,
format = options.format )
if mode not in counts: counts[mode] = 0
counts[mode] += 1
else:
mode = Write( map_row2col, row_seq, col_seq, link,
min_length = options.min_length,
no_gaps = options.no_gaps,
no_identical = options.no_identical,
outfile = outfile,
pair_filter = map_pair2hids,
format = options.format )
if mode not in counts: counts[mode] = 0
counts[mode] += 1
noutput += 1
if outfile: outfile.close()
if options.loglevel >= 1:
options.stdlog.write("# %s\n" % ", ".join( map( lambda x,y: "%s=%i" % (x,y), counts.keys(), counts.values() ) ))
options.stdlog.write("# ninput=%i, noutput=%i, nskipped=%i\n" % (ninput, noutput, nskipped) )
E.Stop()
| [
"[email protected]"
] | |
8dcb09f3f8575bdcaa50a1cc60c11410132e1057 | 8b2e95525139765c5344cc7992203def31b4a300 | /Python-PSdrone/DRONEtowple.py | 1e7fde360af416ea7e17fcad4989e444ac0f22bb | [] | no_license | tttienthinh/Drone | e84ff2b7dea32346e0717a9597a980e15a43543b | 73e98a0e727128a6e290872befe3a42df3d45c7c | refs/heads/master | 2022-04-21T20:16:26.169552 | 2020-04-10T19:31:20 | 2020-04-10T19:31:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | from ps_drone import *
from threading import Thread
class DroneTowple(Thread):
def __init__(self):
self.drone = Drone() # Start using drone
self.drone.startup() # Connects to drone and starts subprocesses
self.drone.reset() # Always good, at start
while self.drone.getBattery()[0] == -1: time.sleep(0.1) # Waits until the drone has done its reset
''' ICI LE NOM DES PACKAGES '''
self.packages = ['altitude', 'demo', 'pressure_raw', 'wind_speed', 'pwm']
self.drone.setConfig("control vz max", "0.04")
self.drone.useDemoMode(False)
self.drone.getNDpackage(self.packages)
Thread.__init__(self)
self.Quit = False
self.start()
def run(self):
input('Entre Enter pour quitter')
self.Quit = True
self.drone.land()
def NavData(self):
navData = self.drone.NavData
return navData
| [
"[email protected]"
] | |
f41bc5dcd8f481d090853be8b87143d2ea9ff3f8 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /JBkfqYW4iYwmgvwTf_5.py | 582e98c416631fbc83dbdb5cd9b9d88f03ac5c67 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | """
Create a function that returns `True` if a number is prime, and `False`
otherwise. A prime number is any positive integer that is evenly divisible by
only two divisors: 1 and itself.
The first ten prime numbers are:
2, 3, 5, 7, 11, 13, 17, 19, 23, 29
### Examples
is_prime(31) ➞ True
is_prime(18) ➞ False
is_prime(11) ➞ True
### Notes
* A prime number has no other factors except 1 and itself.
* If a number is odd it is not divisible by an even number.
* 1 is not considered a prime number.
"""
def is_prime(num):
if num == 1:
return False
else:
for i in range(2,num):
if num%i == 0:
return False
return True
| [
"[email protected]"
] | |
d0e588e90b6e9d3db72d1392a01a7e6b09d3bdb7 | 1b5404b8099de74d4e39e0a41b1d04c61defa8d4 | /five-words-five-letters/stuff/idea1.py | ed03774f208d9a8f86f2c874a5aa0592ccdb6ad7 | [] | no_license | ipeterov/random-stuff | 5d07bdcfdcb145d06ed89095f2ad34b70ff0f0bd | dbb38d42331f636919fd149b23783e02ee2c9afb | refs/heads/master | 2023-05-14T00:41:51.122251 | 2023-05-04T12:10:26 | 2023-05-04T12:10:26 | 206,028,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | import json
N_WORDS = 4
with open("data.json") as f:
data = json.load(f)
words = data["words"]
found_words = []
letters = set()
while len(found_words) < N_WORDS:
for word in words:
if set(word).intersection(letters):
continue
if not found_words:
print(f"Trying {word}")
found_words.append(word)
letters.update(word)
break
else:
found_words = []
letters = set()
words = words[1:]
print(found_words)
| [
"[email protected]"
] | |
744685a3a5a9654399524c7be7eeb912353c7b3b | e3abf21d5e3aac6de49395db8dae56565198a701 | /workon/contrib/flow/redis/consumer.py | 758084a6d86ba4b7505738396020753ea64447ba | [
"BSD-3-Clause"
] | permissive | workon-io/django-workon_old | 0bcb63025eda5d6815b082d23e95ab22385d6233 | be935a07a855b2150b4b81ee87d5041761ff168e | refs/heads/master | 2021-08-19T07:24:19.801106 | 2017-11-25T06:13:00 | 2017-11-25T06:13:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | import asyncio
from aioredis import create_connection, Channel
import websockets
async def publish_to_redis(msg, path):
# Connect to Redis
conn = await create_connection(('localhost', 6379))
# Publish to channel "lightlevel{path}"
await conn.execute('publish', 'lightlevel{}'.format(path), msg)
async def server(websocket, path):
try:
while True:
# Receive data from "the outside world"
message = await websocket.recv()
# Feed this data to the PUBLISH co-routine
await publish_to_redis(message, path)
await asyncio.sleep(1)
except websockets.exceptions.ConnectionClosed:
print('Connection Closed!')
if __name__ == '__main__':
# Boiler-plate for the websocket server, running on localhost, port 8765
loop = asyncio.get_event_loop()
loop.set_debug(True)
ws_server = websockets.serve(server, 'localhost', 8765)
loop.run_until_complete(ws_server)
loop.run_forever() | [
"[email protected]"
] | |
f075789d1da3f0e16a12c9a8acfcde09b6e99bf2 | 8b57c6609e4bf3e6f5e730b7a4a996ad6b7023f0 | /models/view_escaping/search.py | e2ff59769f0e7b547699cce159f1c3593f689047 | [] | no_license | bullll/splunk | 862d9595ad28adf0e12afa92a18e2c96308b19fe | 7cf8a158bc8e1cecef374dad9165d44ccb00c6e0 | refs/heads/master | 2022-04-20T11:48:50.573979 | 2020-04-23T18:12:58 | 2020-04-23T18:12:58 | 258,293,313 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,501 | py | from __future__ import absolute_import
from builtins import object
from splunk.models.view_escaping.base import STRING_SEARCH_MODE, DEFAULT_SEARCH_ID,POST_SEARCH_MODE
from splunk.models.view_escaping.base import SAVED_SEARCH_MODE, TEMPLATE_SEARCH_MODE
from splunk.models.view_escaping.drilldown import parseEventHandler
from splunk.models.view_escaping.tokendeps import parseTokenDeps
class Search(object):
def __init__(self, searchMode=STRING_SEARCH_MODE, searchCommand="", earliestTime=None, latestTime=None, id=None, base=None, app=None, cache=None, sampleRatio=None, tokenDeps=None, refresh=None, refreshType=None):
self.searchMode = searchMode
self.searchCommand = searchCommand
self.earliestTime = earliestTime
self.latestTime = latestTime
self.id = id
self.baseSearchId = base
self.app = app
self.statusBuckets = 0
self.sampleRatio = sampleRatio
self.refresh = refresh
self.refreshType = refreshType
if self.searchMode == POST_SEARCH_MODE and self.baseSearchId == None:
self.baseSearchId = DEFAULT_SEARCH_ID
self.eventHandlers = []
self.cache = cache
self.tokenDeps = tokenDeps
def normalizedSearchCommand(self):
return self.searchCommand.strip()
def createSearchFromSearchXml(searchNode):
"""
Parses a search from search, dashboard, panel element xml nodes
@param searchNode: Lxml representing a form or dashboard element
@param id: and optional id to force id to
@return:
"""
opt = dict()
base = searchNode.attrib.get('base')
if searchNode.find('query') is not None:
opt['searchMode'] = TEMPLATE_SEARCH_MODE
opt['searchCommand'] = (
searchNode.findtext('query')).replace("\n", " ").replace("\t", " ")
sampleRatio = searchNode.findtext('sampleRatio')
if sampleRatio is not None:
opt['sampleRatio'] = int(sampleRatio)
elif searchNode.get('ref') is not None:
opt['searchMode'] = SAVED_SEARCH_MODE
opt['searchCommand'] = (
searchNode.get('ref')).replace("\n", " ").replace("\t", " ")
if searchNode.get('app') is not None:
opt['app'] = searchNode.get('app')
cacheVal = searchNode.findtext('cache')
if cacheVal:
opt['cache'] = cacheVal
elif not base:
return False
for nodePair in [('earliest', 'earliestTime'), ('latest', 'latestTime')]:
nodeVal = searchNode.findtext(nodePair[0])
if nodeVal:
opt[nodePair[1]] = nodeVal
refresh = searchNode.findtext('refresh')
if refresh is not None:
opt['refresh'] = refresh
refreshType = searchNode.findtext('refreshType')
if refreshType is not None:
opt['refreshType'] = refreshType
id = searchNode.attrib.get('id')
tokenDeps = parseTokenDeps(searchNode)
if id:
opt['id'] = id
if base:
opt['base'] = base
opt['searchMode'] = POST_SEARCH_MODE
if tokenDeps:
opt['tokenDeps'] = tokenDeps
search = Search(**opt)
for evtName in ('progress', 'preview', 'done', 'finalized', 'error', 'fail', 'cancelled'):
createEventHandlerFromXml(search, searchNode, evtName)
return search
def createEventHandlerFromXml(search, searchNode, eventName):
node = searchNode.find(eventName)
if node is not None:
search.eventHandlers.append((eventName, parseEventHandler(node, ('any', 'match'))))
| [
"[email protected]"
] | |
9ce428f475168d006ce5e4484cc7ed838008ff93 | 3545ee160458acac7452666aa07826b58e144351 | /demo/text_recognition/__base__/test_scripts/test_crnn.py | e8f2480000631e7e11685879696526362fa2f576 | [
"Apache-2.0"
] | permissive | OCRWorld/DAVAR-Lab-OCR | 7cc81af43a0e8f60066e7761d950f509c40cfd46 | fb47a96d1a38f5ce634c6f12d710ed5300cc89fc | refs/heads/main | 2023-08-29T09:41:19.377628 | 2021-11-08T11:16:37 | 2021-11-08T11:16:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,450 | py | """
##################################################################################################
# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : test_crnn.py
# Abstract : CRNN Model evaluation config
# Current Version: 1.0.0
# Date : 2021-06-11
##################################################################################################
"""
import os
_base_ = [
'../test_base_setting.py'
]
ckpts = list()
# model name setting
out_name = 'davar_test_crnn'
# model parameter dictionary
tmp_dict = dict()
# experiment Name
tmp_dict['Name'] = 'davar_test_crnn'
# ===================== model .pth file path ========================
tmp_dict['ModelPath'] = '/data1/workdir/davar_opensource/att_test/CRNN_pretrained-84c6eb23.pth'
out_name += '/' + tmp_dict['ModelPath'].split('/')[-2].split('.')[0]
# ===================== model config file path ========================
tmp_dict['ConfigPath'] = '/data1/open-source/demo/text_recognition/__base__/res32_bilstm_ctc.py'
# ===================== model test mode ========================
tmp_dict['Epochs'] = None
ckpts.append(tmp_dict)
# save result of the test experiment
out_path = os.path.join('/data1/output_dir/sota_exp', out_name + '/')
force_test = False
force_eval = False
do_test = 1 # 1 for test
do_eval = 1
test_path = out_path + 'res/'
eval_path = out_path + 'eval/'
| [
"[email protected]"
] | |
dc3d16d9e2f41a0ba7b4b953991209736a19d45f | 0bc57447bedd04510a94a35cdee5b8ffdf3e5245 | /musics/urls.py | 4f2331c8f8e2ee46fc5241e057da0295090d012d | [] | no_license | GH-Lim/DRF | 243eead15f28bfbae056d01e0e0821058c39ca38 | b839df7b8eb1adbc72f31b530700389536551308 | refs/heads/master | 2020-08-26T12:14:15.010107 | 2019-10-24T04:37:43 | 2019-10-24T04:37:43 | 217,007,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,211 | py | from django.urls import path
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from . import views
schema_view = get_schema_view(
openapi.Info(
title='Music API',
default_version='v1',
description='음악 관련 API 서비스입니다.',
)
)
app_name = 'musics'
urlpatterns = [
path('musics/', views.music_list, name='music_list'),
path('musics/<int:music_pk>/', views.music_detail, name='music_detail'),
path('musics/<int:music_pk>/comments/', views.comments_create, name='comments_create'),
path('artists/', views.artist_list_apply, name='artist_list_apply'),
path('artists/<int:artist_pk>/', views.artist_detail, name='artist_detail'),
path('artists/<int:artist_pk>/musics/', views.musics_create, name='musics_create'),
path('comments/', views.comment_list, name='comment_list'),
path('comments/<int:comment_pk>/', views.comments_update_and_delete, name='comments_update_and_delete'),
path('artists/<int:comment_pk>/', views.comment_detail, name='comment_detail'),
path('docs/', schema_view.with_ui('redoc'), name='api_docs'),
path('swagger/', schema_view.with_ui('swagger'), name='api_swagger'),
]
| [
"[email protected]"
] | |
6b5349d39fd002718d738dbc82a3a4e7a56d2951 | f614e8567f9458e298c651d0be166da9fc72b4bf | /Django/Solutions/Library + guide/book_app/admin.py | f9c0e4512a43793359d50e572636f53a7f0fbdcf | [] | no_license | PdxCodeGuild/class_Binary_Beasts | 458c5be00b7bce3bb4ac9b7ab485c47f72be4294 | b1298cb5d74513873f82be4ed37676f8b0de93dd | refs/heads/master | 2023-06-28T07:05:21.703491 | 2021-07-29T03:44:09 | 2021-07-29T03:44:09 | 344,980,863 | 4 | 4 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | from django.contrib import admin
from .models import Author, Book, LandBook
# Register your models here.
admin.site.register(Author)
admin.site.register(Book)
admin.site.register(LandBook) | [
"[email protected]"
] | |
c4c57f37ed4a49d9eb574c0bd04b37b0ad5aa68b | 747f759311d404af31c0f80029e88098193f6269 | /addons/pxgo_bank_statement_analytic/__openerp__.py | bc4a6165e0019bfd1aa45c93a4a573e041b856ba | [] | no_license | sgeerish/sirr_production | 9b0d0f7804a928c0c582ddb4ccb7fcc084469a18 | 1081f3a5ff8864a31b2dcd89406fac076a908e78 | refs/heads/master | 2020-05-19T07:21:37.047958 | 2013-09-15T13:03:36 | 2013-09-15T13:03:36 | 9,648,444 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | /home/openerp/production/extra-addons/pxgo_bank_statement_analytic/__openerp__.py | [
"[email protected]"
] | |
4d09d8081569fd51a578fd6ce0ad57f163f3fc80 | 044bb7ac47cfc1a6dc685e81637d6049e5cee452 | /backend_deploy_0330_21140/urls.py | 4e516b775d26f86e79fef19a8ad7ccb1d5a5c7d4 | [] | no_license | crowdbotics-apps/backend-deploy-0330-21140 | c1034bad2df5d212efd4b0d372a96af08e007444 | 57a5e272767f317d5072a6287bf86e80a321d2f5 | refs/heads/master | 2023-03-28T12:21:00.269768 | 2021-03-30T16:53:08 | 2021-03-30T16:53:08 | 353,063,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,279 | py | """backend_deploy_0330_21140 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("home/", include("home.urls")),
path("api/v1/", include("store.api.v1.urls")),
path("store/", include("store.urls")),
path("api/v1/", include("users.api.v1.urls")),
]
admin.site.site_header = "Backend Deploy 0330"
admin.site.site_title = "Backend Deploy 0330 Admin Portal"
admin.site.index_title = "Backend Deploy 0330 Admin"
# swagger
api_info = openapi.Info(
title="Backend Deploy 0330 API",
default_version="v1",
description="API documentation for Backend Deploy 0330 App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"[email protected]"
] | |
db8d4d672410d929a4d61b58127f7fd8a6612656 | aa1352a2f32c0c36194d3a6f8e683adba487a3eb | /FiRoom_backend/tryon/migrations/0002_userbodyshot.py | bef0d540635aef2bce342031f71eb08313e492d7 | [] | no_license | Ace-bb/FiRoom_backend | 6c98d01c40e8de31ccbe86beaeada6c62516705e | efd4d9c1d7265e42f56638d5374a569a146acc03 | refs/heads/main | 2023-03-30T15:48:21.376390 | 2021-03-23T15:53:48 | 2021-03-23T15:53:48 | 338,780,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | # Generated by Django 3.1.4 on 2021-03-14 10:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tryon', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='userBodyShot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('userId', models.IntegerField()),
('userName', models.CharField(max_length=64)),
('shot', models.CharField(max_length=1024)),
],
),
]
| [
"[email protected]"
] | |
4e6beec5dd0e85ffc2488e15652415b40cee11c3 | 23130cd12e38dbce8db8102810edaad70b240ae2 | /lintcode/594.py | a81e22c00509f4292f1898da5963ca42968389f4 | [
"MIT"
] | permissive | kangli-bionic/algorithm | ee6687c82101088db20f10fb958b4e45e97d3d31 | c3c38723b9c5f1cc745550d89e228f92fd4abfb2 | refs/heads/master | 2023-01-05T09:29:33.204253 | 2020-10-25T17:29:38 | 2020-10-25T17:29:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,032 | py | """
robin karp:
abcdefgh, find string def
we only need to calculate a hash for def, that kaes O(K)
abcdefgh
---
abcdefgh
---
we only have to check the hash kth len's at a time, the hash can be culated in O(1) time
hash(bcd) = hash(abc) * 31 + e - 31^4 * a
it will take O(n) to check all the hash agaist the target hash.
for those rare cases where hashes clides, check letter by letter O(k)
total time O(n + k)
"""
class Solution:
"""
@param: source: A source string
@param: target: A target string
@return: An integer as index
"""
def strStr2(self, source, target):
# write your code here
if source is None or target is None:
return -1
if not target:
return 0
if not source:
return -1
BASE = 2000000
k = len(target) #3
highest_power = 1
for i in range(len(target)):
highest_power = (highest_power * 31) % BASE
#abc
#012 = (a31^2+b*31 + c)
#31^3= 1*31^31^31
hash_target = 0
for i in range(len(target)):
hash_target = (hash_target * 31 + ord(target[i])) % BASE
hash_code = 0
#0123456
#abcdefg 7
#efg 3
for i in range(len(source)):
#add next char
hash_code = (hash_code * 31 + ord(source[i])) % BASE
#a*31^3 + b*31*2+ c*31 +d
#abcd
#0123
if i >= k:
hash_code = (hash_code - highest_power * ord(source[i - k])) % BASE
if hash_code < 0:
hash_code += BASE
#match
if hash_code == hash_target:
match = True
for j in range(len(target)):
if source[i - k + 1 + j] != target[j]:
match = False
break
if match:
return i - k + 1
return -1
s = Solution()
source = ""
target = ""
print(s.strStr2(source, target))
| [
"[email protected]"
] | |
41b8ba509c04dd91d9f7df54a9af6e6ac7a56c52 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /izfXy5SGfeekmKExH_15.py | dd6d5ebcb9a300d2e25edcd9ddd6522907132499 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | """
Write a function that takes two lists and adds the first element in the first
list with the first element in the second list, the second element in the
first list with the second element in the second list, etc, etc. Return `True`
if all element combinations add up to the same number. Otherwise, return
`False`.
### Examples
puzzle_pieces([1, 2, 3, 4], [4, 3, 2, 1]) ➞ True
# 1 + 4 = 5; 2 + 3 = 5; 3 + 2 = 5; 4 + 1 = 5
# Both lists sum to [5, 5, 5, 5]
puzzle_pieces([1, 8, 5, 0, -1, 7], [0, -7, -4, 1, 2, -6]) ➞ True
puzzle_pieces([1, 2], [-1, -1]) ➞ False
puzzle_pieces([9, 8, 7], [7, 8, 9, 10]) ➞ False
### Notes
* Each list will have at least one element.
* Return `False` if both lists are of different length.
"""
def puzzle_pieces(a1,a2):
return len(a1) == len(a2) and len(set(list(map(lambda x,y: x + y,a1,a2)))) == 1
| [
"[email protected]"
] | |
4c5f13f15969a69343f4b1d22fc01fd48532443f | 25d641a55eb868cc74fd4e3e4daea43f6deb5853 | /3 - Web UI & API/virtual/Lib/site-packages/pip/_internal/req/_vti_cnf/__init__.py | 47fccc4e6889f48b611d2cacf286d35b83631172 | [] | no_license | greens1/Final-Year-Project | 370b7ef9979049cfc75e6776da24c7a286848b71 | 2e72f43893595deef6aa5323773a6161f2cd873a | refs/heads/master | 2022-11-13T19:55:17.884414 | 2018-05-17T22:15:14 | 2018-05-17T22:15:14 | 133,852,365 | 1 | 0 | null | 2022-11-01T20:11:11 | 2018-05-17T18:21:16 | Python | UTF-8 | Python | false | false | 193 | py | vti_encoding:SR|utf8-nl
vti_timelastmodified:TR|15 Apr 2018 11:07:36 -0000
vti_extenderversion:SR|12.0.0.0
vti_cacheddtm:TX|15 Apr 2018 11:07:36 -0000
vti_filesize:IR|2152
vti_backlinkinfo:VX|
| [
"[email protected]"
] | |
0f261603cefc73452c72b9da92ee340e11c55b14 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/rat_j0455+1305/sdB_RAT_J0455+1305_lc.py | ffb25211b57623347ad503f12006e96baf0c0b4a | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[73.813417,13.091611], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_RAT_J0455+1305 /sdB_RAT_J0455+1305_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
6539b59094b60080421eff95fd512b2b1b2ed89b | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/gui/Scaleform/daapi/view/lobby/telecom_rentals/__init__.py | f6ea7400ff249f307dd982a1c74bf9bf2f1aaa75 | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 152 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/telecom_rentals/__init__.py
pass
| [
"[email protected]"
] | |
11cdf608475191afaab295a70f80e4b59a17acab | c6759b857e55991fea3ef0b465dbcee53fa38714 | /tools/nntool/nntool/utils/subclasses.py | ea06cf5d380d6472e2c4708b7ea47ad84d4290ed | [
"AGPL-3.0-or-later",
"AGPL-3.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Apache-2.0"
] | permissive | GreenWaves-Technologies/gap_sdk | 1b343bba97b7a5ce62a24162bd72eef5cc67e269 | 3fea306d52ee33f923f2423c5a75d9eb1c07e904 | refs/heads/master | 2023-09-01T14:38:34.270427 | 2023-08-10T09:04:44 | 2023-08-10T09:04:44 | 133,324,605 | 145 | 96 | Apache-2.0 | 2023-08-27T19:03:52 | 2018-05-14T07:50:29 | C | UTF-8 | Python | false | false | 855 | py | # Copyright (C) 2021, 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
def get_all_subclasses(cls):
for subclass in cls.__subclasses__():
yield from get_all_subclasses(subclass)
yield subclass
| [
"[email protected]"
] | |
3fbe12b6c3d9e9312e0eb977fbb0043690062f4d | 9c85d132b2ed8c51f021f42ed9f20652827bca45 | /source/res/scripts/client/gui/Scaleform/daapi/view/lobby/vehicle_compare/cmp_configurator_view.py | 8189adf02685d7a82b14defc4eeb0491950b6b8c | [] | no_license | Mododejl/WorldOfTanks-Decompiled | 0f4063150c7148184644768b55a9104647f7e098 | cab1b318a58db1e428811c41efc3af694906ba8f | refs/heads/master | 2020-03-26T18:08:59.843847 | 2018-06-12T05:40:05 | 2018-06-12T05:40:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,803 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/vehicle_compare/cmp_configurator_view.py
from collections import defaultdict
from adisp import process
from debug_utils import LOG_WARNING, LOG_DEBUG, LOG_ERROR
from gui.Scaleform.daapi import LobbySubView
from gui.Scaleform.daapi.settings.views import VIEW_ALIAS
from gui.Scaleform.daapi.view.lobby.hangar.AmmunitionPanel import getFittingSlotsData, getAmmo, VEHICLE_FITTING_SLOTS, ARTEFACTS_SLOTS, FITTING_MODULES
from gui.Scaleform.daapi.view.lobby.shared.fitting_slot_vo import FittingSlotVO
from gui.Scaleform.daapi.view.lobby.vehicle_compare import cmp_helpers
from gui.Scaleform.daapi.view.lobby.vehicle_compare.cmp_configurator_base import VehicleCompareConfiguratorBaseView
from gui.Scaleform.daapi.view.lobby.vehicle_compare.cmp_configurator_parameters import VehicleCompareParameters
from gui.Scaleform.daapi.view.lobby.vehicle_compare.cmp_helpers import getSuitableCamouflage
from gui.Scaleform.daapi.view.meta.VehicleCompareConfiguratorMainMeta import VehicleCompareConfiguratorMainMeta
from gui.Scaleform.daapi.view.meta.VehicleCompareConfiguratorViewMeta import VehicleCompareConfiguratorViewMeta
from gui.Scaleform.framework import g_entitiesFactories
from gui.Scaleform.genConsts.FITTING_TYPES import FITTING_TYPES
from gui.Scaleform.genConsts.TOOLTIPS_CONSTANTS import TOOLTIPS_CONSTANTS
from gui.Scaleform.genConsts.VEHICLE_COMPARE_CONSTANTS import VEHICLE_COMPARE_CONSTANTS
from gui.Scaleform.genConsts.SLOT_HIGHLIGHT_TYPES import SLOT_HIGHLIGHT_TYPES
from gui.Scaleform.locale.VEH_COMPARE import VEH_COMPARE
from gui.Scaleform.locale.TOOLTIPS import TOOLTIPS
from gui.game_control.veh_comparison_basket import CREW_TYPES, PARAMS_AFFECTED_TANKMEN_SKILLS
from gui.shared.event_bus import EVENT_BUS_SCOPE
from gui.shared.formatters import text_styles
from gui.shared.gui_items import Tankman, GUI_ITEM_TYPE
from gui.shared.gui_items.Vehicle import Vehicle
from gui.shared.gui_items.items_actions.actions import processMsg
from gui.shared.gui_items.processors import module as installer_module
from gui.shared.gui_items.processors.module import ModuleProcessor
from gui.shared.utils.functions import makeTooltip
from helpers import dependency
from helpers.i18n import makeString as _ms
from items import tankmen
from items.vehicles import NUM_OPTIONAL_DEVICE_SLOTS
from shared_utils import findFirst
from skeletons.gui.game_control import IVehicleComparisonBasket
from skeletons.gui.shared import IItemsCache
@process
def _installModulesSet(vehicle, modules, notFitted):
UNDEFINED_INDEX = -1
def __findModuleIndex(mList, moduleTypeID):
for i in xrange(len(mList)):
if mList[i].itemTypeID == moduleTypeID:
return i
return UNDEFINED_INDEX
def __updateVehicleModule(vehicle, module):
typeID = module.itemTypeID
if typeID == GUI_ITEM_TYPE.CHASSIS:
vehicle.chassis = module
elif typeID == GUI_ITEM_TYPE.ENGINE:
vehicle.engine = module
elif typeID == GUI_ITEM_TYPE.RADIO:
vehicle.radio = module
elif typeID == GUI_ITEM_TYPE.TURRET:
vehicle.turret = module
elif typeID == GUI_ITEM_TYPE.GUN:
vehicle.gun = module
if modules:
while modules:
module = modules.pop()
isFit, notFitReason = module.mayInstall(vehicle)
if isFit:
yield installer_module.getPreviewInstallerProcessor(vehicle, module).request()
__updateVehicleModule(vehicle, module)
if notFitReason == 'need turret':
turretIndex = __findModuleIndex(modules, GUI_ITEM_TYPE.TURRET)
if turretIndex != UNDEFINED_INDEX:
modules.append(module)
modules.append(modules.pop(turretIndex))
_installModulesSet(vehicle, modules, notFitted)
break
else:
notFitted.append(notFitReason)
if notFitReason == 'need gun':
gunIndex = __findModuleIndex(modules, GUI_ITEM_TYPE.GUN)
if gunIndex != UNDEFINED_INDEX:
modules.append(module)
modules.append(modules.pop(gunIndex))
_installModulesSet(vehicle, modules, notFitted)
break
else:
notFitted.append(notFitReason)
if notFitReason == 'too heavy':
chassisIndex = __findModuleIndex(modules, GUI_ITEM_TYPE.CHASSIS)
if chassisIndex != UNDEFINED_INDEX:
modules.append(module)
modules.append(modules.pop(chassisIndex))
_installModulesSet(vehicle, modules, notFitted)
break
else:
notFitted.append(notFitReason)
if notFitReason == 'too heavy chassis':
modules.insert(0, module)
_installModulesSet(vehicle, modules, notFitted)
break
def _getSlotDataIndexes(slots):
index = 0
indexes = []
for slot in slots:
if slot in ARTEFACTS_SLOTS:
indexes.append(range(index, index + NUM_OPTIONAL_DEVICE_SLOTS))
index += NUM_OPTIONAL_DEVICE_SLOTS
indexes.append((index,))
index += 1
return indexes
_SLOT_DATA_INDEXES = _getSlotDataIndexes(VEHICLE_FITTING_SLOTS)
class _CmpOptDeviceRemover(ModuleProcessor):
def __init__(self, vehicle, item, slotIndex, plugs=tuple()):
super(_CmpOptDeviceRemover, self).__init__(item, 'remove', plugs)
self.__vehicle = vehicle
self.__slotIndex = slotIndex
def _request(self, callback):
mayRemove, reason = self.__vehicle.descriptor.mayRemoveOptionalDevice(self.__slotIndex)
if mayRemove:
self.__vehicle.descriptor.removeOptionalDevice(self.__slotIndex)
self.__vehicle.optDevices[self.__slotIndex] = None
super(_CmpOptDeviceRemover, self)._request(callback)
else:
callback(self._errorHandler(0, reason))
return
def _errorHandler(self, code, errStr='', ctx=None):
if errStr == 'too heavy':
errStr = 'error_too_heavy'
return super(_CmpOptDeviceRemover, self)._errorHandler(code, errStr, ctx)
def _getMsgCtx(self):
return {'name': self.item.userName,
'kind': self.item.userType,
'money': 0}
class _ConfigFittingSlotVO(FittingSlotVO):
def __init__(self, modulesData, vehicle, slotType, slotId=None, tooltipType=None):
super(_ConfigFittingSlotVO, self).__init__(modulesData, vehicle, slotType, slotId, tooltipType)
slotEmpty = self['id'] == -1
self['showRemoveBtn'] = not slotEmpty
if slotEmpty:
self['tooltipType'] = TOOLTIPS_CONSTANTS.COMPLEX
if slotType == 'equipment':
self['tooltip'] = VEH_COMPARE.VEHCONF_TOOLTIPS_EMPTYEQSLOT
elif slotType == 'battleBooster':
self['tooltip'] = TOOLTIPS.HANGAR_AMMO_PANEL_BATTLEBOOSTER_EMPTY
else:
self['tooltip'] = VEH_COMPARE.VEHCONF_TOOLTIPS_EMPTYOPTDEVICESLOT
else:
if slotType == FITTING_TYPES.VEHICLE_TURRET and not vehicle.hasTurrets:
self['tooltipType'] = ''
if slotType == FITTING_TYPES.OPTIONAL_DEVICE:
optDev = findFirst(lambda item: item.isInstalled(vehicle, slotId), modulesData)
if optDev is not None and optDev.isDeluxe():
self['bgHighlightType'] = SLOT_HIGHLIGHT_TYPES.EQUIPMENT_PLUS
else:
self['bgHighlightType'] = SLOT_HIGHLIGHT_TYPES.NO_HIGHLIGHT
self['tooltipType'] = TOOLTIPS_CONSTANTS.COMPARE_MODULE
elif slotType == FITTING_TYPES.BOOSTER:
self['tooltipType'] = TOOLTIPS_CONSTANTS.BATTLE_BOOSTER_COMPARE
else:
self['tooltipType'] = TOOLTIPS_CONSTANTS.COMPARE_MODULE
return
def _prepareModule(self, modulesData, vehicle, slotType, slotId):
if slotType == FITTING_TYPES.BOOSTER:
vehicleModule = vehicle.equipment.battleBoosterConsumables[0]
if vehicleModule is not None:
affectsAtTTC = vehicleModule.isAffectsOnVehicle(vehicle)
self['affectsAtTTC'] = affectsAtTTC
if affectsAtTTC:
if vehicleModule.isCrewBooster():
isPerkReplace = not vehicleModule.isAffectedSkillLearnt(vehicle)
bgType = SLOT_HIGHLIGHT_TYPES.BATTLE_BOOSTER_CREW_REPLACE if isPerkReplace else SLOT_HIGHLIGHT_TYPES.BATTLE_BOOSTER
self['bgHighlightType'] = bgType
else:
self['highlight'] = affectsAtTTC
self['bgHighlightType'] = SLOT_HIGHLIGHT_TYPES.BATTLE_BOOSTER
else:
vehicleModule = super(_ConfigFittingSlotVO, self)._prepareModule(modulesData, vehicle, slotType, slotId)
if slotType == FITTING_TYPES.OPTIONAL_DEVICE:
moduleInSlot = findFirst(lambda item: item.isInstalled(vehicle, slotId), modulesData)
for battleBooster in vehicle.equipment.battleBoosterConsumables:
if battleBooster is not None and battleBooster.isOptionalDeviceCompatible(moduleInSlot):
self['highlight'] = True
break
return vehicleModule
class _DefaultSkillCompletenessChecker(object):
def isCompleted(self, levels, crew):
for lvl in levels:
if lvl < tankmen.MAX_SKILL_LEVEL:
return False
return True
class _FullCrewSkillsCompletenessChecker(_DefaultSkillCompletenessChecker):
def isCompleted(self, levels, crew):
isAllSkillsAre100 = super(_FullCrewSkillsCompletenessChecker, self).isCompleted(levels, crew)
return len(levels) == len(crew) if isAllSkillsAre100 else False
class _CurrentCrewMonitor(object):
_DEF_SKILL_CHECKER = _DefaultSkillCompletenessChecker()
_FULL_CREW_SKILL_CHECKER = _FullCrewSkillsCompletenessChecker()
itemsCache = dependency.descriptor(IItemsCache)
def __init__(self, container):
super(_CurrentCrewMonitor, self).__init__()
self.__container = container
self.__increasedTo100Skills = set()
skillsCheckerStorage = defaultdict(lambda : self._DEF_SKILL_CHECKER)
skillsCheckerStorage[PARAMS_AFFECTED_TANKMEN_SKILLS[0]] = self._FULL_CREW_SKILL_CHECKER
skillsCheckerStorage[PARAMS_AFFECTED_TANKMEN_SKILLS[1]] = self._FULL_CREW_SKILL_CHECKER
vehicleCrew = self.itemsCache.items.getItemByCD(self.__container.getCurrentVehicle().intCD).crew
levelsBySkills = defaultdict(list)
for _, tankman in vehicleCrew:
if tankman is not None:
for skill in tankman.skills:
if skill.name in PARAMS_AFFECTED_TANKMEN_SKILLS:
levelsBySkills[skill.name].append(skill.level)
for skillName, levels in levelsBySkills.iteritems():
if not skillsCheckerStorage[skillName].isCompleted(levels, vehicleCrew):
self.__increasedTo100Skills.add(skillName)
return
def isIncreasedSkillsSelected(self):
if self.__container.getCurrentCrewSkillLevel() != CREW_TYPES.CURRENT:
return False
return True if self.__container.getCurrentCrewSkills().intersection(self.__increasedTo100Skills) else False
def dispose(self):
self.__container = None
return
class _CrewSkillsManager(object):
def __init__(self, vehicle, crewSkillLevel, selectedSkills):
super(_CrewSkillsManager, self).__init__()
self.__vehicle = vehicle
self.__crewSkillLevel = None
self.__selectedSkills = selectedSkills
crewSkills = cmp_helpers.getVehicleCrewSkills(self.__vehicle)
self.__rolesBySkills = defaultdict(set)
self.__skillsByRoles = {}
for idx, (role, skillsSet) in enumerate(crewSkills):
for skill in skillsSet:
self.__rolesBySkills[skill].add((idx, role))
self.__skillsByRoles[role] = skillsSet
self.changeCrewSkillLevel(crewSkillLevel)
return
def toggleSkill(self, skillName):
if skillName not in PARAMS_AFFECTED_TANKMEN_SKILLS:
LOG_WARNING('Attempt to set skill not affected on the vehicle parameters: {}'.format(skillName))
return False
if self.__crewSkillLevel != CREW_TYPES.SKILL_100 and self.__crewSkillLevel != CREW_TYPES.CURRENT:
LOG_WARNING('It is forbidden to set skill for {}% level crew!'.format(self.__crewSkillLevel))
return False
if skillName not in self.__selectedSkills:
self.__selectedSkills.add(skillName)
else:
self.__selectedSkills.remove(skillName)
return self._applyTankmanSkill(self.__vehicle, self.__getAffectedTankmens((skillName,)), self.__skillsByRoles, self.__selectedSkills)
def applySkillForTheSameVehicle(self, vehicle, skillName):
if vehicle.intCD != self.__vehicle.intCD:
LOG_DEBUG('Target vehicle is not the same as current vehicle! Expected {}, received {}'.format(self.__vehicle.intCD, vehicle.intCD))
return False
if skillName not in PARAMS_AFFECTED_TANKMEN_SKILLS:
LOG_WARNING('Attempt to set skill not affected on the vehicle parameters: {}'.format(skillName))
return False
selectedSkills = self.__selectedSkills.copy()
selectedSkills.add(skillName)
return self._applyTankmanSkill(vehicle, self.__getAffectedTankmens((skillName,)), self.__skillsByRoles, selectedSkills)
def changeCrewSkillLevel(self, newSkillsLevel):
success = False
if self.__crewSkillLevel != newSkillsLevel:
self.__crewSkillLevel = newSkillsLevel
skillsByRoles = {}
if self.__crewSkillLevel == CREW_TYPES.SKILL_100 or self.__crewSkillLevel == CREW_TYPES.CURRENT:
affectedTankmens = self.__getAffectedTankmens(self.__selectedSkills)
for idx, role in affectedTankmens:
skillsByRoles[idx] = self.__skillsByRoles[role].intersection(self.__selectedSkills)
if self.__crewSkillLevel == CREW_TYPES.CURRENT:
levelsByIndexes, nativeVehiclesByIndexes = cmp_helpers.getVehCrewInfo(self.__vehicle.intCD)
defRoleLevel = None
else:
nativeVehiclesByIndexes = None
levelsByIndexes = None
defRoleLevel = self.__crewSkillLevel
self.__vehicle.crew = self.__vehicle.getCrewBySkillLevels(defRoleLevel, skillsByRoles, levelsByIndexes, nativeVehiclesByIndexes)
success = True
return success
def getSelectedSkills(self):
return self.__selectedSkills.copy()
def getCrewSkillLevel(self):
return self.__crewSkillLevel
def dispose(self):
self.__vehicle = None
return
@classmethod
def _applyTankmanSkill(cls, vehicle, affectedTankmens, skillsByRoles, selectedSkills):
nationID, vehicleTypeID = vehicle.descriptor.type.id
success = False
for roleIdx, role in affectedTankmens:
skills = skillsByRoles[role]
veh_crew = vehicle.crew
for idx, (vehCrewRoleIdx, vehCrewRole) in enumerate(veh_crew):
if vehCrewRoleIdx == roleIdx:
prevRoleLevel = vehCrewRole.roleLevel if vehCrewRole is not None else tankmen.MAX_SKILL_LEVEL
veh_crew[idx] = (roleIdx, cmp_helpers.createTankman(nationID, vehicleTypeID, role, prevRoleLevel, skills.intersection(selectedSkills)))
success = True
break
else:
LOG_WARNING('Tankmen with role index {} has not been found'.format(roleIdx))
return success
def __getAffectedTankmens(self, skills):
tankmens = set()
for skill in skills:
tankmens |= self.__rolesBySkills[skill]
return tankmens
class VehicleCompareConfiguratorView(LobbySubView, VehicleCompareConfiguratorViewMeta):
itemsCache = dependency.descriptor(IItemsCache)
def __init__(self, ctx=None):
self.__parametersView = None
super(VehicleCompareConfiguratorView, self).__init__(ctx)
self.__slotsVoData = [None] * (_SLOT_DATA_INDEXES[-1][-1] + 1)
self.__currentCrewMonitor = None
return
def onCloseView(self):
self._container.closeView()
def onCamouflageUpdated(self):
self.as_setCamoS(self._container.isCamouflageSet())
self.__updateParametersView()
def onOptDeviceUpdated(self):
self.__updateOptDevicesData()
def onEquipmentUpdated(self):
self.__updateEquipmentData()
def onBattleBoosterUpdated(self):
self.__updateBattleBoosterData()
def onModulesUpdated(self):
self.__updateSlotsData(FITTING_MODULES)
self.__updateParametersView()
self.as_setTopModulesSelectedS(self._container.isTopModulesSelected())
def onCrewSkillUpdated(self):
self.__updateParametersView()
self.__updateCrewAttentionIcon()
self.__updateBattleBoosterData()
def onCrewLevelUpdated(self, newLvl):
self.__updateParametersView()
self.__updateCrewSelectionAvailability(newLvl)
self.__updateCrewAttentionIcon()
def onResetToDefault(self):
self.__updateSkillsData()
self.__parametersView.init(self._container.getCurrentVehicle())
self.__updateSlotsData(VEHICLE_FITTING_SLOTS)
self.as_setTopModulesSelectedS(self._container.isTopModulesSelected())
self.__updateCrewLvl()
self.__updateCrewAttentionIcon()
def onShellsUpdated(self, updateShells=False, selectedIndex=-1):
if selectedIndex != -1:
self.as_setSelectedAmmoIndexS(selectedIndex)
self.__updateParametersView()
if updateShells:
self.__updateShellSlots()
self.__updateControlBtns()
def resetConfig(self):
self._container.resetToDefault()
def applyConfig(self):
self._container.applyNewParameters()
self.onCloseView()
def selectShell(self, shellId, slotIndex):
self._container.selectShell(slotIndex)
def camoSelected(self, selected):
self._container.selectCamouflage(selected)
def installDevice(self, newId, slotType, slotIndex):
if slotType == cmp_helpers.OPTIONAL_DEVICE_TYPE_NAME:
self._container.installOptionalDevice(newId, slotIndex)
elif slotType == cmp_helpers.EQUIPMENT_TYPE_NAME:
self._container.installEquipment(newId, slotIndex)
else:
LOG_ERROR('{} installDevice. Unsupported slotType: {}'.format(self, slotType))
def removeDevice(self, slotType, slotIndex):
if slotType == cmp_helpers.OPTIONAL_DEVICE_TYPE_NAME:
self._container.removeOptionalDevice(slotIndex)
elif slotType == cmp_helpers.EQUIPMENT_TYPE_NAME:
self._container.removeEquipment(slotIndex)
elif slotType == cmp_helpers.BATTLE_BOOSTER_TYPE_NAME:
self._container.removeBattleBooster()
else:
LOG_ERROR('{} removeDevice. Unsupported slotType: {}'.format(self, slotType))
def toggleTopModules(self, value):
self._container.selectTopModules(value)
def showModules(self):
self._container.as_showViewS(VEHICLE_COMPARE_CONSTANTS.VEHICLE_MODULES_VIEW)
def skillSelect(self, skillType, slotIndex, selected):
self._container.selectCrewSkill(skillType, selected)
def changeCrewLevel(self, crewLevelId):
self._container.selectCrewLevel(crewLevelId)
def _onRegisterFlashComponent(self, viewPy, alias):
super(VehicleCompareConfiguratorView, self)._onRegisterFlashComponent(viewPy, alias)
if isinstance(viewPy, VehicleCompareParameters):
self.__parametersView = viewPy
def _init(self):
super(VehicleCompareConfiguratorView, self)._init()
currentVehicle = self._container.getCurrentVehicle()
enableCamo = bool(getSuitableCamouflage(currentVehicle))
topModulesFromStock = self._container.isTopModulesFromStock()
enableTopModules = not (currentVehicle.isPremium or topModulesFromStock)
isInInventory = self._container.getBasketVehCmpData().isInInventory()
if isInInventory:
self.__currentCrewMonitor = _CurrentCrewMonitor(self._container)
self.as_setInitDataS({'title': text_styles.promoSubTitle(_ms(VEH_COMPARE.VEHCONF_HEADER, vehName=currentVehicle.userName)),
'resetBtnLabel': VEH_COMPARE.VEHCONF_RESETBTNLABEL,
'cancelBtnLabel': VEH_COMPARE.VEHCONF_CANCELBTNLABEL,
'applyBtnLabel': VEH_COMPARE.VEHCONF_APPLYBTNLABEL,
'resetBtnTooltip': VEH_COMPARE.VEHCONF_RESETBTNLABEL_TOOLTIP,
'cancelBtnTooltip': VEH_COMPARE.VEHCONF_CANCELBTNLABEL_TOOLTIP,
'applyBtnTooltip': VEH_COMPARE.VEHCONF_COMPAREBTNLABEL_TOOLTIP,
'crewLevels': self.__getCrewLevels(isInInventory),
'enableTopModules': enableTopModules,
'enableCamo': enableCamo})
self.__updateCrewLvl()
self.__updateShellSlots()
self.as_setSelectedAmmoIndexS(self._container.getCurrentShellIndex())
self.as_setCamoS(self._container.isCamouflageSet())
if currentVehicle.descriptor.type.hasCustomDefaultCamouflage:
self.as_disableCamoS()
self.__updateControlBtns()
topModulesSelected = topModulesFromStock or self._container.isTopModulesSelected()
self.as_setTopModulesSelectedS(topModulesSelected)
self.__updateCrewAttentionIcon()
self.__updateSkillsData()
self.__updateSlotsData(VEHICLE_FITTING_SLOTS)
initialVehicle, _ = self._container.getInitialVehicleData()
self.__parametersView.init(currentVehicle, initialVehicle)
def _dispose(self):
if self.__currentCrewMonitor:
self.__currentCrewMonitor.dispose()
self.__parametersView = None
super(VehicleCompareConfiguratorView, self)._dispose()
return
def __updateOptDevicesData(self):
self.__updateSlotsData((cmp_helpers.OPTIONAL_DEVICE_TYPE_NAME,))
def __updateEquipmentData(self):
self.__updateSlotsData((cmp_helpers.EQUIPMENT_TYPE_NAME,))
def __updateBattleBoosterData(self):
self.__updateSlotsData((cmp_helpers.BATTLE_BOOSTER_TYPE_NAME, cmp_helpers.OPTIONAL_DEVICE_TYPE_NAME))
def __updateSlotsData(self, slotsTypes):
newVoData = getFittingSlotsData(self._container.getCurrentVehicle(), slotsTypes, _ConfigFittingSlotVO)
for slotType in slotsTypes:
indexesRange = _SLOT_DATA_INDEXES[VEHICLE_FITTING_SLOTS.index(slotType)]
for idx in indexesRange:
newSlotData = newVoData.pop(0)
slotDataID = newSlotData.get('id', 0)
if slotDataID > 0:
moduleItem = self.itemsCache.items.getItemByCD(slotDataID)
itemTypeID = moduleItem.itemTypeID
itemName = moduleItem.name
if itemTypeID == GUI_ITEM_TYPE.EQUIPMENT:
if itemName in cmp_helpers.NOT_AFFECTED_EQUIPMENTS:
newSlotData['affectsAtTTC'] = False
newSlotData['tooltipType'] = 'complex'
newSlotData['tooltip'] = makeTooltip(moduleItem.userName, attention=VEH_COMPARE.VEHCONF_TOOLTIPS_DEVICENOTAFFECTEDTTC)
self.__slotsVoData[idx] = newSlotData
self.as_setDevicesDataS(self.__slotsVoData)
self.__updateParametersView()
def __updateParametersView(self):
if self.__parametersView is not None:
self.__parametersView.update()
self.__updateControlBtns()
return
def __updateCrewSelectionAvailability(self, newLvl):
self.as_setSkillsBlockedS(newLvl != CREW_TYPES.SKILL_100 and newLvl != CREW_TYPES.CURRENT)
def __updateControlBtns(self):
self.as_setResetEnabledS(self._container.isDifferentWithInitialBasketVeh())
self.as_setApplyEnabledS(self._container.isCurrentVehicleModified())
def __updateSkillsData(self):
skills = [ {'icon': Tankman.getSkillBigIconPath(skillType),
'label': Tankman.getSkillUserName(skillType),
'skillType': skillType,
'isForAll': skillType in tankmen.COMMON_SKILLS,
'selected': skillType in self._container.getCurrentCrewSkills()} for skillType in PARAMS_AFFECTED_TANKMEN_SKILLS ]
self.as_setSkillsS(skills)
@staticmethod
def __getCrewLevels(isInHangar):
items = [{'label': VEH_COMPARE.VEHICLECOMPAREVIEW_CREW_SKILL100,
'id': CREW_TYPES.SKILL_100,
'showAlert': False,
'tooltip': None}, {'label': VEH_COMPARE.VEHICLECOMPAREVIEW_CREW_SKILL75,
'id': CREW_TYPES.SKILL_75}, {'label': VEH_COMPARE.VEHICLECOMPAREVIEW_CREW_SKILL50,
'id': CREW_TYPES.SKILL_50}]
if isInHangar:
items.append({'label': VEH_COMPARE.VEHICLECOMPAREVIEW_CREW_CURRENT,
'id': CREW_TYPES.CURRENT})
return items
def __updateCrewLvl(self):
crewLevel = self._container.getCurrentCrewSkillLevel()
self.as_setCrewLevelIndexS(CREW_TYPES.ALL.index(crewLevel))
self.__updateCrewSelectionAvailability(crewLevel)
def __updateShellSlots(self):
getter = self.itemsCache.items.getItemByCD
shells = [ getter(shot.shell.compactDescr) for shot in self._container.getCurrentVehicle().descriptor.gun.shots ]
self.as_setAmmoS(getAmmo(shells))
def __updateCrewAttentionIcon(self):
isVisible = False
if self.__currentCrewMonitor:
isVisible = self.__currentCrewMonitor.isIncreasedSkillsSelected()
self.as_setCrewAttentionIconVisibleS(isVisible)
class VehicleCompareConfiguratorMain(LobbySubView, VehicleCompareConfiguratorMainMeta):
itemsCache = dependency.descriptor(IItemsCache)
comparisonBasket = dependency.descriptor(IVehicleComparisonBasket)
def __init__(self, ctx=None):
super(VehicleCompareConfiguratorMain, self).__init__(ctx)
self.__vehIndex = ctx.get('index')
if not isinstance(self.__vehIndex, int):
raise UserWarning('Index of vehicle should be integer: {}'.format(self.__vehIndex))
if self.__vehIndex < 0 or self.__vehIndex >= self.comparisonBasket.getVehiclesCount():
raise UserWarning('Index of vehicle out of range: {} not in (0, {})'.format(self.__vehIndex, self.comparisonBasket.getVehiclesCount()))
self.__backAlias = ctx.get('previewAlias', VIEW_ALIAS.VEHICLE_COMPARE)
self.__vehicle = None
self.__crewSkillsManager = None
self.__views = {}
self.__topModules = None
self.__stockModules = None
self.__selectedShellIndex = None
return
def as_showViewS(self, alias):
result = super(VehicleCompareConfiguratorMain, self).as_showViewS(alias)
if alias in self.__views:
self.__views[alias].onShow()
return result
def getCurrentVehicle(self):
return self.__vehicle
def getInitialVehicleData(self):
basketVehicle = self.getBasketVehCmpData()
if basketVehicle.isInInventory():
strCD = basketVehicle.getInvVehStrCD()
crewLvl, crewSkills = basketVehicle.getInventoryCrewData()
equipment = basketVehicle.getInvEquipment()
else:
strCD = basketVehicle.getStockVehStrCD()
crewLvl = basketVehicle.getStockCrewLvl()
crewSkills = basketVehicle.getStockCrewSkills()
equipment = basketVehicle.getStockEquipment()
vehicle = Vehicle(strCD)
for slotIndex in xrange(len(equipment)):
cmp_helpers.installEquipmentOnVehicle(vehicle, equipment[slotIndex], slotIndex)
cmp_helpers.applyCamouflage(vehicle, basketVehicle.invHasCamouflage())
return (vehicle, _CrewSkillsManager(vehicle, crewLvl, crewSkills))
def getCurrentCrewSkills(self):
return self.__crewSkillsManager.getSelectedSkills()
def getCurrentCrewSkillLevel(self):
return self.__crewSkillsManager.getCrewSkillLevel()
def getBasketVehCmpData(self):
return self.comparisonBasket.getVehicleAt(self.__vehIndex)
def getVehicleWithAppliedSkill(self, skillName):
vehicle = self._getCurrentVehicleCopy()
return vehicle if self.__crewSkillsManager.applySkillForTheSameVehicle(vehicle, skillName) else None
def getCurrentShellIndex(self):
return self.__selectedShellIndex
def isTopModulesFromStock(self):
topModules = self.__getTopModules()
stockModules = self.__getStockModules()
return all((bool(item in stockModules) for item in topModules))
def isTopModulesSelected(self):
topModules = self.__getTopModules()
selectedModules = cmp_helpers.getVehicleModules(self.__vehicle)
return all((bool(item in selectedModules) for item in topModules))
def isCamouflageSet(self):
return cmp_helpers.isCamouflageSet(self.__vehicle)
def applyNewParameters(self):
self.comparisonBasket.applyNewParameters(self.__vehIndex, self.__vehicle, self.getCurrentCrewSkillLevel(), self.getCurrentCrewSkills(), self.getCurrentShellIndex())
def isDifferentWithInitialBasketVeh(self):
basketVehicle = self.getBasketVehCmpData()
if basketVehicle.isInInventory():
basketVehCrewLvl, basketVehCrewSkills = basketVehicle.getInventoryCrewData()
equipment = basketVehicle.getInvEquipment()
strCD = basketVehicle.getInvVehStrCD()
else:
basketVehCrewLvl = basketVehicle.getStockCrewLvl()
basketVehCrewSkills = basketVehicle.getStockCrewSkills()
equipment = basketVehicle.getStockEquipment()
strCD = basketVehicle.getStockVehStrCD()
return self.__isHasDifferences(strCD, equipment, basketVehCrewLvl, basketVehCrewSkills, basketVehicle.getInventoryShellIndex(), basketVehicle.invHasCamouflage(), basketVehicle.getBattleBooster())
def isCurrentVehicleModified(self):
basketVehicle = self.getBasketVehCmpData()
crewLvl, crewSkills = basketVehicle.getCrewData()
return self.__isHasDifferences(basketVehicle.getVehicleStrCD(), basketVehicle.getEquipment(), crewLvl, crewSkills, basketVehicle.getSelectedShellIndex(), basketVehicle.hasCamouflage(), basketVehicle.getBattleBooster())
def __isHasDifferences(self, strCD, equipment, basketVehCrewLvl, basketVehCrewSkills, selShellIndex, hasCamouflage, battleBooster):
if basketVehCrewLvl != self.getCurrentCrewSkillLevel():
return True
elif basketVehCrewSkills != self.getCurrentCrewSkills():
return True
elif not cmp_helpers.isEquipmentSame(equipment, self.__vehicle.equipment.regularConsumables.getIntCDs(default=None)):
return True
elif selShellIndex != self.__selectedShellIndex:
return True
else:
currVehHasCamouflage = cmp_helpers.isCamouflageSet(self.__vehicle)
if hasCamouflage != currVehHasCamouflage:
return True
currVehBattleBooster = self.__vehicle.equipment.battleBoosterConsumables[0]
if not battleBooster == currVehBattleBooster:
return True
if currVehHasCamouflage:
targetVehicle = Vehicle(self.__vehicle.descriptor.makeCompactDescr())
cmp_helpers.removeVehicleCamouflages(targetVehicle)
else:
targetVehicle = self.__vehicle
return True if strCD != targetVehicle.descriptor.makeCompactDescr() else False
def setModules(self, modules):
if modules:
notFittedReasons = []
oldGunIntCD = self.__vehicle.gun.intCD
_installModulesSet(self.__vehicle, list(modules[:]), notFittedReasons)
if notFittedReasons:
for notFitReason in notFittedReasons:
LOG_DEBUG('Module has not been installed properly, reason: {}'.format(notFitReason))
if oldGunIntCD != self.__vehicle.gun.intCD:
firstShellIndex = 0
newShellIndex = -1
if self.__updateSelectedShell(firstShellIndex):
newShellIndex = firstShellIndex
self.__notifyViews('onShellsUpdated', updateShells=True, selectedIndex=newShellIndex)
else:
newGunToInstall = findFirst(lambda module: module.itemTypeID == GUI_ITEM_TYPE.GUN, modules, None)
if newGunToInstall is not None:
self.__vehicle.descriptor.activeGunShotIndex = self.__selectedShellIndex
self.__notifyViews('onModulesUpdated')
return
def selectTopModules(self, select):
if select:
modules = self.__getTopModules()
else:
modules = self.__getStockModules()
self.setModules(modules)
def selectShell(self, slotIndex):
if self.__updateSelectedShell(slotIndex):
self.__notifyViews('onShellsUpdated', selectedIndex=slotIndex)
def selectCamouflage(self, select):
cmp_helpers.applyCamouflage(self.__vehicle, select)
self.__notifyViews('onCamouflageUpdated')
def resetToDefault(self):
self.__vehicle, self.__crewSkillsManager = self.getInitialVehicleData()
basketShellIndex = self.getBasketVehCmpData().getInventoryShellIndex()
newShellIndex = -1
if self.__updateSelectedShell(basketShellIndex):
newShellIndex = basketShellIndex
self.__notifyViews('onShellsUpdated', updateShells=True, selectedIndex=newShellIndex)
self.__notifyViews('onResetToDefault')
self.__notifyViews('onCamouflageUpdated')
def installOptionalDevice(self, newId, slotIndex):
newId = int(newId)
optDev = self.itemsCache.items.getItemByCD(newId)
itemTypeID = optDev.itemTypeID
mayInstall, reason = optDev.mayInstall(self.__vehicle, slotIndex)
if mayInstall:
self.__vehicle.descriptor.installOptionalDevice(optDev.intCD, slotIndex)
self.__vehicle.optDevices[slotIndex] = optDev
self.__notifyViews('onOptDeviceUpdated')
self.__notifyViews('onBattleBoosterUpdated')
else:
LOG_WARNING('Component "{}" has not been installed. Reason: {}.'.format(itemTypeID, reason))
def removeOptionalDevice(self, slotIndex):
installedDevice = self.__vehicle.optDevices[slotIndex]
if installedDevice is not None:
self.__launchOptDeviceRemoving(installedDevice, slotIndex)
self.__notifyViews('onBattleBoosterUpdated')
else:
LOG_WARNING("Couldn't remove optional device from slot {} because slot is already empty!".format(slotIndex))
return
def installEquipment(self, newId, slotIndex):
self.__installEquipment(newId, slotIndex)
self.__notifyViews('onEquipmentUpdated')
def removeEquipment(self, slotIndex):
self.__installEquipment(None, slotIndex)
self.__notifyViews('onEquipmentUpdated')
return
def installBattleBooster(self, newId):
self.__installBattleBooster(newId)
self.__notifyViews('onBattleBoosterUpdated')
def removeBattleBooster(self):
self.__installBattleBooster(None)
self.__notifyViews('onBattleBoosterUpdated')
return
def selectCrewSkill(self, skillType, selected):
savedValue = skillType in self.__crewSkillsManager.getSelectedSkills()
if selected != savedValue:
if self.__crewSkillsManager.toggleSkill(skillType):
self.__notifyViews('onCrewSkillUpdated')
else:
LOG_WARNING('Attempt to apply the same value for {} = {}'.format(skillType, selected))
def selectCrewLevel(self, crewLevelId):
if self.__crewSkillsManager.changeCrewSkillLevel(crewLevelId):
self.__notifyViews('onCrewLevelUpdated', crewLevelId)
def closeView(self, forcedBackAliace=None):
event = g_entitiesFactories.makeLoadEvent(forcedBackAliace or self.__backAlias)
self.fireEvent(event, scope=EVENT_BUS_SCOPE.LOBBY)
def _getCurrentVehicleCopy(self):
vehicle = Vehicle(strCompactDescr=self.__vehicle.descriptor.makeCompactDescr())
vehicle.crew = self.__vehicle.crew[:]
for i, equipmentIntCD in enumerate(self.__vehicle.equipment.regularConsumables.getIntCDs(default=None)):
cmp_helpers.installEquipmentOnVehicle(vehicle, equipmentIntCD, i)
vehicle.descriptor.activeGunShotIndex = self.__vehicle.descriptor.activeGunShotIndex
return vehicle
def _populate(self):
super(VehicleCompareConfiguratorMain, self)._populate()
self.comparisonBasket.onSwitchChange += self.__onBasketStateChanged
basketVehcileData = self.getBasketVehCmpData()
basketVehCrewLvl, basketVehCrewSkills = basketVehcileData.getCrewData()
self.__vehicle = Vehicle(basketVehcileData.getVehicleStrCD())
self.__crewSkillsManager = _CrewSkillsManager(self.__vehicle, basketVehCrewLvl, basketVehCrewSkills)
equipment = basketVehcileData.getEquipment()
for slotIndex in xrange(len(equipment)):
self.__installEquipment(equipment[slotIndex], slotIndex)
cmp_helpers.applyCamouflage(self.__vehicle, basketVehcileData.hasCamouflage())
battleBooster = basketVehcileData.getBattleBooster()
if battleBooster is not None:
cmp_helpers.installBattleBoosterOnVehicle(self.__vehicle, battleBooster.intCD)
self.__updateSelectedShell(basketVehcileData.getSelectedShellIndex())
self.as_showViewS(VEHICLE_COMPARE_CONSTANTS.VEHICLE_CONFIGURATOR_VIEW)
self.comparisonBasket.isLocked = True
return
def _dispose(self):
self.comparisonBasket.onSwitchChange -= self.__onBasketStateChanged
if self.__crewSkillsManager is not None:
self.__crewSkillsManager.dispose()
self.__crewSkillsManager = None
self.__views = None
self.comparisonBasket.isLocked = False
super(VehicleCompareConfiguratorMain, self)._dispose()
return
def _onRegisterFlashComponent(self, viewPy, alias):
super(VehicleCompareConfiguratorMain, self)._onRegisterFlashComponent(viewPy, alias)
if isinstance(viewPy, VehicleCompareConfiguratorBaseView):
self.__views[alias] = viewPy
viewPy.setContainer(self)
@process
def __launchOptDeviceRemoving(self, installedDevice, slotIndex):
result = yield _CmpOptDeviceRemover(self.__vehicle, installedDevice, slotIndex).request()
if result.success:
self.__notifyViews('onOptDeviceUpdated')
else:
processMsg(result)
def __getTopModules(self):
if self.__topModules is None:
self.__topModules = cmp_helpers.getVehicleTopModules(self.__vehicle)
return self.__topModules
def __getStockModules(self):
if self.__stockModules is None:
self.__stockModules = tuple(reversed(cmp_helpers.getVehicleModules(Vehicle(self.getBasketVehCmpData().getStockVehStrCD()))))
return self.__stockModules
def __installEquipment(self, intCD, slotIndex):
cmp_helpers.installEquipmentOnVehicle(self.__vehicle, intCD, slotIndex)
def __installBattleBooster(self, intCD):
cmp_helpers.installBattleBoosterOnVehicle(self.__vehicle, intCD)
def __updateSelectedShell(self, slotIndex):
slotIndex = int(slotIndex)
if self.__selectedShellIndex != slotIndex:
self.__vehicle.descriptor.activeGunShotIndex = slotIndex
self.__selectedShellIndex = slotIndex
return True
return False
def __onBasketStateChanged(self):
if not self.comparisonBasket.isEnabled():
self.closeView(VIEW_ALIAS.LOBBY_HANGAR)
def __notifyViews(self, event, *args, **kwargs):
for component in self.__views.itervalues():
notifier = getattr(component, event, None)
if notifier and callable(notifier):
notifier(*args, **kwargs)
return
| [
"[email protected]"
] | |
e653e6a03c8f0d3af8e678a8ee480073889dfb04 | 362e68fa033cc42cf9981e5f0c441ef2fb4816e6 | /scripts/pick_reference_otus_through_otu_table.py | 5b07919bad20bc776fd595b77388ecffb039c3d5 | [] | no_license | kartoffelpuffer/qiime | 6d409c058f777be3e17a7130d0902f4d0256795a | eeac244b5553579a8d0b540c31d6202acbc983d3 | refs/heads/master | 2020-12-25T02:39:52.585360 | 2013-04-25T16:59:25 | 2013-04-25T16:59:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | #!/usr/bin/env python
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.6.0-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "[email protected]"
__status__ = "Development"
print "\nThis script has been renamed pick_closed_reference_otus.py for clarity. For help, call pick_closed_reference_otus.py -h\n" | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.