blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c12bc6fa7adfc6eae327069b0170b1dad10e7206 | 9ff1058a0500be499fd3de9ec0beccd697d5273c | /DWIProcessing/Preprocessing/PNC/preprocPNC_v1.py | 1ad2d145b43118a7419a6f7f7407225200cd6450 | [] | no_license | jrussell9000/NeuroScripts | 93f53c7d38c1d51fdc0cf39096e0996daee887cf | e41558754bd36385f94934333cb39a6500abfd9f | refs/heads/master | 2021-06-09T20:30:59.956137 | 2021-04-08T18:45:39 | 2021-04-08T18:45:39 | 151,635,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,908 | py | #!/usr/bin/env python3
# coding: utf-8
import os
import shutil
import subprocess
from joblib import parallel_backend, delayed, Parallel
from pathlib import Path
#####################################
# ----PREPREOCESSING PARAMETERS---- #
#####################################
# --Change as needed - last set for BRC YouthPTSD
bidsmaster_dir = Path('/fast_scratch/jdr/PNC/BIDS_Master/')
bidspreproc_dir = Path('/fast_scratch/jdr/BIDS_Preprocessing/')
bidsproc_dir = Path('/fast_scratch/jdr/BIDS_Processed')
# slspec = Path('/Users/jdrussell3/slspec.txt')
# dwelltime = "0.000568"
# totalreadouttime = "0.14484"
error_file = bidspreproc_dir / 'errors.txt'
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
class dwipreproc():
def __init__(self, ses_dir):
self.ses_dir = ses_dir
self.subj_dir = ses_dir.parent
self.subjroot = "_".join([self.subj_dir.name, self.ses_dir.name])
self.main(self.ses_dir)
def preproc_prep(self, ses_dir):
####################################################################################
# ----Creating Directory Structures, Copying Files, and Initializing Variables---- #
####################################################################################
# 1. Setting variables
dwi_dir = ses_dir / 'dwi'
anat_dir = ses_dir / 'anat'
sourcedwi = dwi_dir/(self.subjroot + '_acq-AxDTIASSET_dwi.nii')
sourcebvec = dwi_dir/(self.subjroot + '_acq-AxDTIASSET_dwi.bvec')
sourcebval = dwi_dir/(self.subjroot + '_acq-AxDTIASSET_dwi.bval')
sourceanat = anat_dir/(self.subjroot + '_acq-AXFSPGRBRAVONEW_T1w.nii')
if not sourcedwi.exists():
next
# 2. Create directory structure
preproc_dir = bidspreproc_dir / self.subj_dir.name / ses_dir.name
self.preprocdwi_dir = preproc_dir / 'dwi'
# if self.preprocdwi_dir.exists():
# shutil.rmtree(self.preprocdwi_dir)
self.preprocdwi_dir.mkdir(parents=True, exist_ok=True)
self.preprocanat_dir = preproc_dir / 'anat'
# if self.preprocanat_dir.exists():
# shutil.rmtree(self.preprocanat_dir)
self.preprocanat_dir.mkdir(parents=True, exist_ok=True)
# 3. Make directories to hold 'original' unprocessed files
origdwi_dir = self.preprocdwi_dir / 'original'
origdwi_dir.mkdir(parents=True, exist_ok=True)
origanat_dir = self.preprocanat_dir / 'original'
origanat_dir.mkdir(parents=True, exist_ok=True)
# 4. Copy source files to 'original' directory
self.inputdwi = origdwi_dir / (self.subjroot + '_dwi.nii')
self.inputbvec = origdwi_dir / (self.subjroot + '_dwi.bvec')
self.inputbval = origdwi_dir / (self.subjroot + '_dwi.bval')
self.inputanat = origanat_dir / (self.subjroot + '_T1w.nii')
try:
shutil.copyfile(sourcedwi, self.inputdwi)
shutil.copyfile(sourcebvec, self.inputbvec)
shutil.copyfile(sourcebval, self.inputbval)
shutil.copyfile(sourceanat, self.inputanat)
except FileNotFoundError as e:
with open(error_file, 'w+') as errorfile:
errorfile.write(self.subjroot + ': Preprocessing failed due to missing file - ' + str(e))
next
# 6. Create subject specific log file for preprocessing pipeline in 'preprocessed' directory
logfile = preproc_dir / (self.subjroot + "_ppd.txt")
with open(logfile, 'a') as log:
###########################################################
# ----Preparing Log File and Creating Pre-Eddy Folder---- #
###########################################################
# 1. Print the log file header
startstr1 = "\n\t BRAVE RESEARCH CENTER\n\t DTI PREPROCESSING PIPELINE\n"
startstr2 = "\tSUBJECT: " + self.subj_dir.name[-3:] + " " + \
"SESSION: " + ses_dir.name[-2:] + "\n"
log.write(44*"%")
log.write(startstr1)
log.write(" " + "_"*43 + " \n\n")
log.write(startstr2)
log.write(44*"%" + "\n\n")
# 2. Convert to MIF format
log.write("#----Converting to .MIF format----#\n\n")
log.flush()
log.flush()
def denoise(self):
self.dwi_denoised = self.preprocdwi_dir / (self.subjroot + '_denoised.nii')
subprocess.run(['dwidenoise', '-force', self.inputdwi, self.dwi_denoised])
def degibbs(self):
self.dwi_degibbs = self.preprocdwi_dir / (self.subjroot + '_degibbs.nii')
subprocess.run(['mrdegibbs', '-force', self.dwi_denoised, self.dwi_degibbs])
def regrid(self):
self.dwi_regrid = self.preprocdwi_dir / (self.subjroot + '_regrid.nii')
subprocess.run(['mrgrid', '-info', '-force', self.dwi_degibbs, 'regrid', self.dwi_regrid,
'-voxel', '1'])
def synb0(self):
synb0_dir = self.preprocdwi_dir / 'synb0'
if synb0_dir.exists():
shutil.rmtree(synb0_dir)
synb0_dir.mkdir(exist_ok=True)
self.synb0_INPUT_dir = synb0_dir / 'INPUTS'
if self.synb0_INPUT_dir.exists():
shutil.rmtree(self.synb0_INPUT_dir)
self.synb0_INPUT_dir.mkdir(exist_ok=True)
self.synb0_OUTPUT_dir = synb0_dir / 'OUTPUTS'
if self.synb0_OUTPUT_dir.exists():
shutil.rmtree(self.synb0_OUTPUT_dir)
self.synb0_OUTPUT_dir.mkdir(exist_ok=True)
all_b0 = self.synb0_INPUT_dir / 'all_b0.nii'
subprocess.run(['dwiextract', '-force', '-fslgrad', self.inputbvec, self.inputbval, self.dwi_regrid, all_b0])
syn_b0 = self.synb0_INPUT_dir / 'b0.nii.gz'
subprocess.run(['mrmath', '-force', all_b0, 'mean', syn_b0, '-axis', '3'])
synb0_T1 = self.synb0_INPUT_dir / 'T1.nii.gz'
shutil.copy(self.inputanat, synb0_T1)
self.synb0_topup_acqc = self.synb0_INPUT_dir / 'acqparams.txt'
with open(self.synb0_topup_acqc, 'w') as acqfile:
acqfile.write("0 1 0 0.14484" + '\n' + "0 1 0 0")
subprocess.run(['docker', 'run', '--rm', '-v', str(self.synb0_INPUT_dir)+str(':/INPUTS/'), '-v',
str(self.synb0_OUTPUT_dir)+str(':/OUTPUTS/'),
'-v', '/fast_scratch/jdr/dwiproc_test/ses-01/license.txt:/extra/freesurfer/license.txt',
'--user', '57059:20', 'hansencb/synb0'])
def eddy(self):
# REMOVE AFTER TESTING #
synb0_dir = self.preprocdwi_dir / 'synb0'
self.synb0_INPUT_dir = synb0_dir / 'INPUTS'
self.synb0_OUTPUT_dir = synb0_dir / 'OUTPUTS'
self.synb0_topup_acqc = self.synb0_INPUT_dir / 'acqparams.txt'
###########################
eddy_dir = self.preprocdwi_dir / 'eddy'
eddy_dir.mkdir(exist_ok=True)
# Create dwi mask
dwi_mask = eddy_dir / (self.subjroot + '_mask.nii')
subprocess.run(['dwi2mask', '-force', '-fslgrad', self.inputbvec, self.inputbval, self.inputdwi, dwi_mask])
# Generating volume index file
eddy_index = eddy_dir / 'eddy_index.txt'
with open(eddy_index, 'w') as indexfile:
getnvols = subprocess.Popen(
['fslval', self.inputdwi, 'dim4'], stdout=subprocess.PIPE)
nvols = getnvols.stdout.read()
for i in range(int(nvols)):
indexfile.write("1 ")
# Run eddy
eddy_basename = str(eddy_dir / (self.subjroot + '_dwi_eddy'))
subprocess.run(['eddy_cuda9.1', '--imain='+str(self.inputdwi), '--mask='+str(dwi_mask),
'--acqp='+str(self.synb0_topup_acqc), '--index='+str(eddy_index),
'--bvecs='+str(self.inputbvec), '--bvals='+str(self.inputbval),
'--topup='+str(self.synb0_OUTPUT_dir)+('/topup'),
'--out='+eddy_basename, '--repol', '--residuals', '--slm=linear', '--very_verbose'])
self.dwi_eddycorr = eddy_dir / (self.subjroot + '_dwi_eddy.nii.gz')
def biascorrection(self):
self.biascorr = self.preprocdwi_dir / (self.subjroot + '_biascorr.nii')
subprocess.run(['dwibiascorrect', '-info', '-force', 'ants', self.dwi_eddycorr,
self.biascorr, '-scratch', '/tmp'])
def main(self, ses_dir):
self.preproc_prep(ses_dir)
self.denoise()
self.degibbs()
self.regrid()
self.synb0()
self.eddy()
ses_dirs = lambda: (ses_dir for ses_dir in bidsmaster_dir.glob('*/ses-01') # noqa: E731
if ses_dir.parent.name == 'sub-001')
def container(ses_dir):
c = dwipreproc(ses_dir) # noqa: F841
with parallel_backend("loky", inner_max_num_threads=1):
results = Parallel(n_jobs=1, verbose=1)(
delayed(container)(ses_dir) for ses_dir in sorted(ses_dirs()))
| [
"[email protected]"
] | |
4beef22bf29700d3794b948c98dbaa4b55e1f8e0 | 7db6c1865cf9102808824ff06cda747b6e572a21 | /Python/Test/Time/time_count.py | 9d6d2bf361c05494395587a1c48789d455799998 | [] | no_license | hyteer/testing | 1f6cabc1d2b67faa4533e6ad7eb5be8c13d542c9 | 1d8b47b3bbb2daf00e4f15b5d18e86111ea4e113 | refs/heads/master | 2020-05-21T16:19:08.243676 | 2017-01-03T01:25:17 | 2017-01-03T01:25:17 | 60,914,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | import time
from datetime import datetime
def compute(i):
for i in range(1,i):
i = i+1
return i
start_time = datetime.now()
print "start:%s" % str(start_time)
n = compute(100000)
end_time = datetime.now()
print "end:%s" % str(end_time)
#elapsed_time = end_time - start_time
#print "elapsed_time:%s" % str(elapsed_time)
#print "start:%r, End:%r" % (start_time, end_time)
#rint datetime.now() | [
"[email protected]"
] | |
88f98b361bb900da84472e106fdc314378c2e695 | facbdbdadacd23f6c83d266116dc14744741070f | /Core_Python/Day-22/Dict/13.py | b12af90d65221bfd295325d609af46cfb2c20761 | [] | no_license | Yogesh-Singh-Gadwal/YSG_Python | 51b6b53fe34567bf066b6e487c00da766b47ac6b | f0d6841e1f92d1d2b27d8ecdd332d40b49a5ca69 | refs/heads/master | 2023-06-06T04:40:12.004713 | 2021-07-06T19:59:26 | 2021-07-06T19:59:26 | 292,482,586 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | # dict
import time
d1 = {
"a":"micky",
"b":"akira",
"c":"rahul"
}
print(d1)
print(type(d1))
print()
time.sleep(3)
d1['d'] = 'amit'
print(d1)
| [
"[email protected]"
] | |
a72087a44335f22fff3c3d621b637d544e2e3392 | 07cabeb47bd7c9a4e06e824ece28631c7d7441a1 | /virtual/bin/isort | 2e2a74cd71c3ca27a9b212875725fc510dc66f43 | [
"MIT"
] | permissive | Jeffmusa/PITCH-POOL | bd2b27ea5bc5b47499c0b822c46ff518eae5f2f4 | 96654a3ba7fc3f4ba00d7fb617644cc9cd5ba041 | refs/heads/master | 2020-03-28T04:17:07.471479 | 2018-09-13T13:21:17 | 2018-09-13T13:21:17 | 147,705,197 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 236 | #!/home/vicklyne/Pitch/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from isort.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
fc99177cb2c2d349aa7b8d333935662131b2b0d2 | ba86ef56fb2ff1a8bf9be3058b58b9e48e7b50ce | /apps/rrhh/urls/activoUrls.py | f9ebbd3822e6e27218afabebd53c16babc23aa94 | [] | no_license | robertowest/lubre_homepage | 277f8fc81512b482fbea539234f30ef3eb801480 | 9de02443ba2ee3cd48afd2b7d580a09081fe84f2 | refs/heads/master | 2023-07-14T04:39:38.640155 | 2021-08-30T17:43:56 | 2021-08-30T17:43:56 | 223,473,409 | 0 | 0 | null | 2020-05-07T13:50:46 | 2019-11-22T19:34:22 | Python | UTF-8 | Python | false | false | 563 | py | from django.urls import path
from apps.rrhh.views import activoViews as views
app_name = "activo"
urlpatterns = [
path('', views.ActivoTemplateView.as_view(), name='index'),
path('listado/', views.ActivosListView.as_view(), name='list'),
path('<int:fk>/crear/', views.ActivoCreateView.as_view(), name='create'),
path('<int:pk>/', views.ActivoDetailView.as_view(), name='detail'),
path('<int:pk>/modificar/', views.ActivoUpdateView.as_view(), name='update'),
path('<int:pk>/eliminar/', views.ActivoDeleteView.as_view(), name='delete'),
] | [
"[email protected]"
] | |
d1ca7482114376f98e1dcdf854e9233fdc546a85 | 71f7d58c9a33fc8fdfdd85d5f432565010856c5a | /ciscripts/check/project/__init__.py | 69052c33b396fab013dd28c9d6aedeb5ed0e50c7 | [
"MIT"
] | permissive | polysquare/polysquare-ci-scripts | 32a3bbcab62d77c1dfcbbf0ad78a23306e67d8c6 | 9978f0600ea964a9f2dffd9f4eb01a10d08d6788 | refs/heads/master | 2022-10-27T23:37:25.192253 | 2018-02-22T02:03:11 | 2018-02-22T02:03:20 | 28,320,857 | 2 | 2 | MIT | 2022-10-23T07:10:40 | 2014-12-22T01:58:27 | Python | UTF-8 | Python | false | false | 193 | py | # /ciscripts/check/project/__init__.py
#
# Module loader file for /ciscripts/check/project.
#
# See /LICENCE.md for Copyright information
"""Module loader file for /ciscripts/check/project."""
| [
"[email protected]"
] | |
a40241e3028bcbd61a3f99c32bbbcd513a9624c9 | e0f133b49f9f0f416f14da70a2cadb7011c0cb7b | /new_spider/extractor/souhu_learning/souhu_extractor.py | dfe47174d0ba973418907950c1f51c2e8a8f0f9a | [] | no_license | cash2one/python_frame | ac52d052fd3698303f1f4fa022f3b35a56e07533 | 2dbda155780a19cf42d5376104879d0667fbbf75 | refs/heads/master | 2021-06-18T13:28:40.356527 | 2017-06-28T02:51:35 | 2017-06-28T02:51:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,564 | py | # -*- coding: utf8 -*-
import traceback
import datetime
import re
import json
from lxml.html import fromstring
import sys
import random
reload(sys)
sys.setdefaultencoding('utf8')
from store.sx_basestore import BaseStore
from bs4 import BeautifulSoup
'''
使用bs 解析
'''
class SourceExtractor (object):
'''
-1 解析异常
1 无数据
'''
def extractor_list_lxml(self, body):
try:
tree = fromstring(body) # 这种方式 可使用cssselect 不然linux 不能使用
list_box = tree.cssselect('div.main-content')
# .main - content
if list_box:
result_list = list()
list_content = tree.cssselect("li")
if list_content:
for list_one in list_content:
content_title = list_one.cssselect('span.content-title > a')
if content_title:
print content_title.get("href")
else:
return 1
except:
print traceback.format_exc()
return -1
# -1 解析异常 1 无内容 解析目录
def extractor_list_bs_catalog(self, body):
try:
bsObj = BeautifulSoup(body, 'html.parser')
dl_list = bsObj.find_all("dl")
result_list = list()
if dl_list:
for dl_one in dl_list:
a_list = dl_one.find_all("a")
if a_list:
for i, a_one in enumerate(a_list):
item = dict()
if i == 0:
url = a_one.attrs["href"]
catalog_one = a_one.get_text()
catalog_two = ""
else:
url = a_one.attrs["href"]
catalog_two = a_one.get_text()
item["url"] = url
item["catalog_one"] = catalog_one
try:
item["catalog_two"] = catalog_two
except:
item["catalog_two"] = ""
result_list.append(item)
return result_list
return 1
except:
print traceback.format_exc()
return -1
# -1 解析异常 1 无内容 一级目录 列表页解析
def extractor_list_bs(self, body, extractor_page=0):
try:
if body.find("</html>") > -1:
pass
else:
return 1
ext_result = dict()
if extractor_page == 1:
maxPage, showPages = self.extractor_body(body)
ext_result["maxPage"] = maxPage
ext_result["showPages"] = showPages
bsObj = BeautifulSoup(body, 'html.parser')
list_box = bsObj.find_all("div", {"class": "main-content"})
result_list = list()
if list_box:
list_content = list_box[0].find_all("li")
if list_content:
for list_one in list_content:
spans = list_one.find_all("span", {"class": "content-title"})
if spans:
a_one = spans[0].find("a").attrs["href"]
result_list.append(a_one)
ext_result["result_list"] = result_list
return ext_result
except:
print traceback.format_exc()
return -1
# -1 解析异常 1 无内容 二级 列表页解析
def extractor_list_categoryteo_bs(self, body, extractor_page=0):
try:
if body.find("</html>") > -1:
pass
else:
return 1
ext_result = dict()
if extractor_page == 1:
maxPage, showPages = self.extractor_body(body)
ext_result["maxPage"] = maxPage
ext_result["showPages"] = showPages
bsObj = BeautifulSoup(body, 'html.parser')
span_list_box = bsObj.find_all("span", {"class": "content-title"})
result_list = list()
if span_list_box:
for span_one in span_list_box:
a_list = span_one.find_all("a")
if a_list:
href_url = a_list[0].attrs["href"]
result_list.append(href_url)
ext_result["result_list"] = result_list
return ext_result
except:
print traceback.format_exc()
return -1
# 详情页 解析
def extractor_detail_bs(self, body):
try:
html_item = dict()
content = ""
title = ""
crumbs = ""
img_srcs = list()
file_names = list()
content, img_srcs, file_names = self.get_content_body(body)
# print content
bsObj = BeautifulSoup(body, 'html.parser')
# bsObj = BeautifulSoup(body, 'lxml')
location = bsObj.find_all("div", {"class": "location"})
head = ""
if location:
head = location[0].get_text()
start_index = head.find(">")
crumbs = head[start_index + 1:].replace(" ", "")
h1 = bsObj.find_all("h1")
if h1:
title = h1[0].get_text()
# print crumbs, title
# print img_srcs, file_names
html_item["crumbs"] = crumbs
html_item["title"] = title
html_item["content"] = content
# print type(content)
html_item["img_srcs"] = img_srcs
html_item["file_names"] = file_names
return html_item
# return 1
except:
print traceback.format_exc()
return -1
def get_content_body(self, body):
start_index = body.find("contentText")
temp_body = body[start_index:]
start_index = temp_body.find("<")
temp_body = temp_body[start_index:]
end_index = temp_body.find("</div>")
content = temp_body[0: end_index]
return self.analyze_content(content)
def analyze_content(self, content):
# 解析 图片名
img_srcs = re.findall(r"""src=\"(.*?)\"""", content)
file_names = list()
if img_srcs:
for content_one in img_srcs:
start_index = content_one.rfind("/")
end_index = content_one.rfind(".")
# 分100个文件夹
filename = "images/img%s/" % str(random.randint(1, 100)) + content_one[
start_index + 1: end_index] + ".jpg"
file_names.append(filename)
content = content.replace(content_one, filename)
return content, img_srcs, file_names
def extractor_body(self, body):
body_start_index = body.find("var maxPage =")
temp_body = body[body_start_index:]
temp_end_index = temp_body.find(";")
maxPage = int(temp_body[14: temp_end_index])
body_start_index = body.find("var showPages =")
temp_body = body[body_start_index:]
temp_end_index = temp_body.find(";")
showPages = int(temp_body[15: temp_end_index])
return maxPage, showPages
if __name__ == '__main__':
# sx = HandleCsvDeal()
extractor = SourceExtractor()
sxfile = open("detail.txt", "rb")
content = sxfile.read()
# print content
returntext = extractor.extractor_detail_bs(content)
# returntext = extractor.extractor_list_categoryteo_bs(content)
# returntext = extractor.extractor_list_bs_catalog(content)
returntext["url"] = "http://learning.sohu.com/20170502/n491504271.shtml"
# print returntext["content"]
# self, results, table="", type=1, field=None, db_connnection=""
con = {'host': '115.159.0.225',
'user': 'remote',
'password': 'Iknowthat',
'db': 'souhu_learning'}
sx_store = BaseStore()
store_list = list()
store_list.append(returntext)
sx_store.store_table_db(store_list, table="souhu_details", db_connnection=con)
# returntext = extractor.extractor_photojs(content, 12)
# print len(returntext)
# filename = "csv01010.csv"
# sx.sx_write_File(filename, returntext)
| [
"[email protected]"
] | |
7d17d96e13057b1f94d9771992243f7e923410b6 | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/nos/v7_2_0/interface/hundredgigabitethernet/switchport/__init__.py | 5aee5f6f90f1fafbc5ecf6e1dd1f68151d3786c2 | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,646 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import mode
import port_security
import access
import access_mac_group_vlan_classification
import access_mac_vlan_classification
import trunk_private_vlan_classification
import trunk
import private_vlan
import access_mac_group_rspan_vlan_classification
import access_mac_rspan_vlan_classification
class switchport(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/hundredgigabitethernet/switchport. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: The L2 switching characteristics of an interface.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__mode','__port_security','__access','__access_mac_group_vlan_classification','__access_mac_vlan_classification','__trunk_private_vlan_classification','__trunk','__private_vlan','__access_mac_group_rspan_vlan_classification','__access_mac_rspan_vlan_classification',)
_yang_name = 'switchport'
_rest_name = 'switchport'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__trunk_private_vlan_classification = YANGDynClass(base=trunk_private_vlan_classification.trunk_private_vlan_classification, is_container='container', presence=False, yang_name="trunk-private-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'ctag-pvlan-classification-phy-config'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
self.__private_vlan = YANGDynClass(base=private_vlan.private_vlan, is_container='container', presence=False, yang_name="private-vlan", rest_name="private-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set Private-Vlan Configuration'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
self.__access_mac_vlan_classification = YANGDynClass(base=access_mac_vlan_classification.access_mac_vlan_classification, is_container='container', presence=False, yang_name="access-mac-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'gvlan-access-port-config-phy'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
self.__access = YANGDynClass(base=access.access, is_container='container', presence=False, yang_name="access", rest_name="access", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the Layer2 interface as Access', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
self.__access_mac_group_vlan_classification = YANGDynClass(base=access_mac_group_vlan_classification.access_mac_group_vlan_classification, is_container='container', presence=False, yang_name="access-mac-group-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'mac-group-vlan-classification-config-phy'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
self.__port_security = YANGDynClass(base=port_security.port_security, is_container='container', presence=True, yang_name="port-security", rest_name="port-security", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable port-security feature', u'callpoint': u'interface_portsecurity'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
self.__access_mac_group_rspan_vlan_classification = YANGDynClass(base=access_mac_group_rspan_vlan_classification.access_mac_group_rspan_vlan_classification, is_container='container', presence=False, yang_name="access-mac-group-rspan-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
self.__mode = YANGDynClass(base=mode.mode, is_container='container', presence=False, yang_name="mode", rest_name="mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set mode of the Layer2 interface', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
self.__trunk = YANGDynClass(base=trunk.trunk, is_container='container', presence=False, yang_name="trunk", rest_name="trunk", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the Layer2 interface as trunk', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
self.__access_mac_rspan_vlan_classification = YANGDynClass(base=access_mac_rspan_vlan_classification.access_mac_rspan_vlan_classification, is_container='container', presence=False, yang_name="access-mac-rspan-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'hundredgigabitethernet', u'switchport']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'HundredGigabitEthernet', u'switchport']
def _get_mode(self):
"""
Getter method for mode, mapped from YANG variable /interface/hundredgigabitethernet/switchport/mode (container)
YANG Description: The mode of the Layer2 interface.
"""
return self.__mode
def _set_mode(self, v, load=False):
"""
Setter method for mode, mapped from YANG variable /interface/hundredgigabitethernet/switchport/mode (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mode() directly.
YANG Description: The mode of the Layer2 interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=mode.mode, is_container='container', presence=False, yang_name="mode", rest_name="mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set mode of the Layer2 interface', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mode must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=mode.mode, is_container='container', presence=False, yang_name="mode", rest_name="mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set mode of the Layer2 interface', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__mode = t
if hasattr(self, '_set'):
self._set()
def _unset_mode(self):
self.__mode = YANGDynClass(base=mode.mode, is_container='container', presence=False, yang_name="mode", rest_name="mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set mode of the Layer2 interface', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
def _get_port_security(self):
"""
Getter method for port_security, mapped from YANG variable /interface/hundredgigabitethernet/switchport/port_security (container)
YANG Description: Enable port-security feature
"""
return self.__port_security
def _set_port_security(self, v, load=False):
"""
Setter method for port_security, mapped from YANG variable /interface/hundredgigabitethernet/switchport/port_security (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_security is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_security() directly.
YANG Description: Enable port-security feature
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=port_security.port_security, is_container='container', presence=True, yang_name="port-security", rest_name="port-security", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable port-security feature', u'callpoint': u'interface_portsecurity'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port_security must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=port_security.port_security, is_container='container', presence=True, yang_name="port-security", rest_name="port-security", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable port-security feature', u'callpoint': u'interface_portsecurity'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__port_security = t
if hasattr(self, '_set'):
self._set()
def _unset_port_security(self):
self.__port_security = YANGDynClass(base=port_security.port_security, is_container='container', presence=True, yang_name="port-security", rest_name="port-security", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable port-security feature', u'callpoint': u'interface_portsecurity'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
def _get_access(self):
"""
Getter method for access, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access (container)
YANG Description: The access layer characteristics of this
interface.
"""
return self.__access
def _set_access(self, v, load=False):
"""
Setter method for access, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_access is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_access() directly.
YANG Description: The access layer characteristics of this
interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=access.access, is_container='container', presence=False, yang_name="access", rest_name="access", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the Layer2 interface as Access', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """access must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=access.access, is_container='container', presence=False, yang_name="access", rest_name="access", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the Layer2 interface as Access', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__access = t
if hasattr(self, '_set'):
self._set()
def _unset_access(self):
self.__access = YANGDynClass(base=access.access, is_container='container', presence=False, yang_name="access", rest_name="access", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the Layer2 interface as Access', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
def _get_access_mac_group_vlan_classification(self):
"""
Getter method for access_mac_group_vlan_classification, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access_mac_group_vlan_classification (container)
"""
return self.__access_mac_group_vlan_classification
def _set_access_mac_group_vlan_classification(self, v, load=False):
"""
Setter method for access_mac_group_vlan_classification, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access_mac_group_vlan_classification (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_access_mac_group_vlan_classification is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_access_mac_group_vlan_classification() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=access_mac_group_vlan_classification.access_mac_group_vlan_classification, is_container='container', presence=False, yang_name="access-mac-group-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'mac-group-vlan-classification-config-phy'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """access_mac_group_vlan_classification must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=access_mac_group_vlan_classification.access_mac_group_vlan_classification, is_container='container', presence=False, yang_name="access-mac-group-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'mac-group-vlan-classification-config-phy'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__access_mac_group_vlan_classification = t
if hasattr(self, '_set'):
self._set()
def _unset_access_mac_group_vlan_classification(self):
self.__access_mac_group_vlan_classification = YANGDynClass(base=access_mac_group_vlan_classification.access_mac_group_vlan_classification, is_container='container', presence=False, yang_name="access-mac-group-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'mac-group-vlan-classification-config-phy'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
def _get_access_mac_vlan_classification(self):
"""
Getter method for access_mac_vlan_classification, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access_mac_vlan_classification (container)
"""
return self.__access_mac_vlan_classification
def _set_access_mac_vlan_classification(self, v, load=False):
"""
Setter method for access_mac_vlan_classification, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access_mac_vlan_classification (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_access_mac_vlan_classification is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_access_mac_vlan_classification() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=access_mac_vlan_classification.access_mac_vlan_classification, is_container='container', presence=False, yang_name="access-mac-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'gvlan-access-port-config-phy'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """access_mac_vlan_classification must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=access_mac_vlan_classification.access_mac_vlan_classification, is_container='container', presence=False, yang_name="access-mac-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'gvlan-access-port-config-phy'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__access_mac_vlan_classification = t
if hasattr(self, '_set'):
self._set()
def _unset_access_mac_vlan_classification(self):
self.__access_mac_vlan_classification = YANGDynClass(base=access_mac_vlan_classification.access_mac_vlan_classification, is_container='container', presence=False, yang_name="access-mac-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'gvlan-access-port-config-phy'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
def _get_trunk_private_vlan_classification(self):
"""
Getter method for trunk_private_vlan_classification, mapped from YANG variable /interface/hundredgigabitethernet/switchport/trunk_private_vlan_classification (container)
"""
return self.__trunk_private_vlan_classification
def _set_trunk_private_vlan_classification(self, v, load=False):
"""
Setter method for trunk_private_vlan_classification, mapped from YANG variable /interface/hundredgigabitethernet/switchport/trunk_private_vlan_classification (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_trunk_private_vlan_classification is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_trunk_private_vlan_classification() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=trunk_private_vlan_classification.trunk_private_vlan_classification, is_container='container', presence=False, yang_name="trunk-private-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'ctag-pvlan-classification-phy-config'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """trunk_private_vlan_classification must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=trunk_private_vlan_classification.trunk_private_vlan_classification, is_container='container', presence=False, yang_name="trunk-private-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'ctag-pvlan-classification-phy-config'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__trunk_private_vlan_classification = t
if hasattr(self, '_set'):
self._set()
def _unset_trunk_private_vlan_classification(self):
self.__trunk_private_vlan_classification = YANGDynClass(base=trunk_private_vlan_classification.trunk_private_vlan_classification, is_container='container', presence=False, yang_name="trunk-private-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'ctag-pvlan-classification-phy-config'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
def _get_trunk(self):
"""
Getter method for trunk, mapped from YANG variable /interface/hundredgigabitethernet/switchport/trunk (container)
YANG Description: The trunking characteristics of this interface.
"""
return self.__trunk
def _set_trunk(self, v, load=False):
"""
Setter method for trunk, mapped from YANG variable /interface/hundredgigabitethernet/switchport/trunk (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_trunk is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_trunk() directly.
YANG Description: The trunking characteristics of this interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=trunk.trunk, is_container='container', presence=False, yang_name="trunk", rest_name="trunk", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the Layer2 interface as trunk', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """trunk must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=trunk.trunk, is_container='container', presence=False, yang_name="trunk", rest_name="trunk", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the Layer2 interface as trunk', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__trunk = t
if hasattr(self, '_set'):
self._set()
def _unset_trunk(self):
self.__trunk = YANGDynClass(base=trunk.trunk, is_container='container', presence=False, yang_name="trunk", rest_name="trunk", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the Layer2 interface as trunk', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
def _get_private_vlan(self):
"""
Getter method for private_vlan, mapped from YANG variable /interface/hundredgigabitethernet/switchport/private_vlan (container)
YANG Description: Set Private-Vlan Configuration
"""
return self.__private_vlan
def _set_private_vlan(self, v, load=False):
"""
Setter method for private_vlan, mapped from YANG variable /interface/hundredgigabitethernet/switchport/private_vlan (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_private_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_private_vlan() directly.
YANG Description: Set Private-Vlan Configuration
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=private_vlan.private_vlan, is_container='container', presence=False, yang_name="private-vlan", rest_name="private-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set Private-Vlan Configuration'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """private_vlan must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=private_vlan.private_vlan, is_container='container', presence=False, yang_name="private-vlan", rest_name="private-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set Private-Vlan Configuration'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__private_vlan = t
if hasattr(self, '_set'):
self._set()
def _unset_private_vlan(self):
self.__private_vlan = YANGDynClass(base=private_vlan.private_vlan, is_container='container', presence=False, yang_name="private-vlan", rest_name="private-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set Private-Vlan Configuration'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
def _get_access_mac_group_rspan_vlan_classification(self):
"""
Getter method for access_mac_group_rspan_vlan_classification, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access_mac_group_rspan_vlan_classification (container)
"""
return self.__access_mac_group_rspan_vlan_classification
def _set_access_mac_group_rspan_vlan_classification(self, v, load=False):
"""
Setter method for access_mac_group_rspan_vlan_classification, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access_mac_group_rspan_vlan_classification (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_access_mac_group_rspan_vlan_classification is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_access_mac_group_rspan_vlan_classification() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=access_mac_group_rspan_vlan_classification.access_mac_group_rspan_vlan_classification, is_container='container', presence=False, yang_name="access-mac-group-rspan-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """access_mac_group_rspan_vlan_classification must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=access_mac_group_rspan_vlan_classification.access_mac_group_rspan_vlan_classification, is_container='container', presence=False, yang_name="access-mac-group-rspan-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__access_mac_group_rspan_vlan_classification = t
if hasattr(self, '_set'):
self._set()
def _unset_access_mac_group_rspan_vlan_classification(self):
self.__access_mac_group_rspan_vlan_classification = YANGDynClass(base=access_mac_group_rspan_vlan_classification.access_mac_group_rspan_vlan_classification, is_container='container', presence=False, yang_name="access-mac-group-rspan-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
def _get_access_mac_rspan_vlan_classification(self):
"""
Getter method for access_mac_rspan_vlan_classification, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access_mac_rspan_vlan_classification (container)
"""
return self.__access_mac_rspan_vlan_classification
def _set_access_mac_rspan_vlan_classification(self, v, load=False):
"""
Setter method for access_mac_rspan_vlan_classification, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access_mac_rspan_vlan_classification (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_access_mac_rspan_vlan_classification is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_access_mac_rspan_vlan_classification() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=access_mac_rspan_vlan_classification.access_mac_rspan_vlan_classification, is_container='container', presence=False, yang_name="access-mac-rspan-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """access_mac_rspan_vlan_classification must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=access_mac_rspan_vlan_classification.access_mac_rspan_vlan_classification, is_container='container', presence=False, yang_name="access-mac-rspan-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__access_mac_rspan_vlan_classification = t
if hasattr(self, '_set'):
self._set()
def _unset_access_mac_rspan_vlan_classification(self):
self.__access_mac_rspan_vlan_classification = YANGDynClass(base=access_mac_rspan_vlan_classification.access_mac_rspan_vlan_classification, is_container='container', presence=False, yang_name="access-mac-rspan-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
mode = __builtin__.property(_get_mode, _set_mode)
port_security = __builtin__.property(_get_port_security, _set_port_security)
access = __builtin__.property(_get_access, _set_access)
access_mac_group_vlan_classification = __builtin__.property(_get_access_mac_group_vlan_classification, _set_access_mac_group_vlan_classification)
access_mac_vlan_classification = __builtin__.property(_get_access_mac_vlan_classification, _set_access_mac_vlan_classification)
trunk_private_vlan_classification = __builtin__.property(_get_trunk_private_vlan_classification, _set_trunk_private_vlan_classification)
trunk = __builtin__.property(_get_trunk, _set_trunk)
private_vlan = __builtin__.property(_get_private_vlan, _set_private_vlan)
access_mac_group_rspan_vlan_classification = __builtin__.property(_get_access_mac_group_rspan_vlan_classification, _set_access_mac_group_rspan_vlan_classification)
access_mac_rspan_vlan_classification = __builtin__.property(_get_access_mac_rspan_vlan_classification, _set_access_mac_rspan_vlan_classification)
_pyangbind_elements = {'mode': mode, 'port_security': port_security, 'access': access, 'access_mac_group_vlan_classification': access_mac_group_vlan_classification, 'access_mac_vlan_classification': access_mac_vlan_classification, 'trunk_private_vlan_classification': trunk_private_vlan_classification, 'trunk': trunk, 'private_vlan': private_vlan, 'access_mac_group_rspan_vlan_classification': access_mac_group_rspan_vlan_classification, 'access_mac_rspan_vlan_classification': access_mac_rspan_vlan_classification, }
| [
"[email protected]"
] | |
49cf7d40a41f08cf4b9942fb0992993977cdd6cb | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/classification/Gluon_ResNet50_v1b_for_PyTorch/timm/models/layers/halo_attn.py | 5cb9d54dd40bdc666fb9eb7c60ee2eaa1c43e199 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"CC-BY-NC-4.0",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 7,400 | py | # Copyright 2019 Ross Wightman
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Halo Self Attention
Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones`
- https://arxiv.org/abs/2103.12731
@misc{2103.12731,
Author = {Ashish Vaswani and Prajit Ramachandran and Aravind Srinivas and Niki Parmar and Blake Hechtman and
Jonathon Shlens},
Title = {Scaling Local Self-Attention for Parameter Efficient Visual Backbones},
Year = {2021},
}
Status:
This impl is a WIP, there is no official ref impl and some details in paper weren't clear to me.
Trying to match the 'H1' variant in the paper, my parameter counts are 2M less and the model
is extremely slow. Something isn't right. However, the models do appear to train and experimental
variants with attn in C4 and/or C5 stages are tolerable speed.
Hacked together by / Copyright 2021 Ross Wightman
"""
from typing import Tuple, List
import torch
from torch import nn
import torch.nn.functional as F
from .weight_init import trunc_normal_
def rel_logits_1d(q, rel_k, permute_mask: List[int]):
""" Compute relative logits along one dimension
As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925
Args:
q: (batch, height, width, dim)
rel_k: (2 * window - 1, dim)
permute_mask: permute output dim according to this
"""
B, H, W, dim = q.shape
rel_size = rel_k.shape[0]
win_size = (rel_size + 1) // 2
x = (q @ rel_k.transpose(-1, -2))
x = x.reshape(-1, W, rel_size)
# pad to shift from relative to absolute indexing
x_pad = F.pad(x, [0, 1]).flatten(1)
x_pad = F.pad(x_pad, [0, rel_size - W])
# reshape and slice out the padded elements
x_pad = x_pad.reshape(-1, W + 1, rel_size)
x = x_pad[:, :W, win_size - 1:]
# reshape and tile
x = x.reshape(B, H, 1, W, win_size).expand(-1, -1, win_size, -1, -1)
return x.permute(permute_mask)
class PosEmbedRel(nn.Module):
""" Relative Position Embedding
As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925
"""
def __init__(self, block_size, win_size, dim_head, scale):
"""
Args:
block_size (int): block size
win_size (int): neighbourhood window size
dim_head (int): attention head dim
scale (float): scale factor (for init)
"""
super().__init__()
self.block_size = block_size
self.dim_head = dim_head
self.scale = scale
self.height_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * self.scale)
self.width_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * self.scale)
def forward(self, q):
B, BB, HW, _ = q.shape
# relative logits in width dimension.
q = q.reshape(-1, self.block_size, self.block_size, self.dim_head)
rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4))
# relative logits in height dimension.
q = q.transpose(1, 2)
rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2))
rel_logits = rel_logits_h + rel_logits_w
rel_logits = rel_logits.reshape(B, BB, HW, -1)
return rel_logits
class HaloAttn(nn.Module):
""" Halo Attention
Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones`
- https://arxiv.org/abs/2103.12731
"""
def __init__(
self, dim, dim_out=None, stride=1, num_heads=8, dim_head=16, block_size=8, halo_size=3, qkv_bias=False):
super().__init__()
dim_out = dim_out or dim
assert dim_out % num_heads == 0
self.stride = stride
self.num_heads = num_heads
self.dim_head = dim_head
self.dim_qk = num_heads * dim_head
self.dim_v = dim_out
self.block_size = block_size
self.halo_size = halo_size
self.win_size = block_size + halo_size * 2 # neighbourhood window size
self.scale = self.dim_head ** -0.5
# FIXME not clear if this stride behaviour is what the paper intended
# Also, the paper mentions using a 3D conv for dealing with the blocking/gather, and leaving
# data in unfolded block form. I haven't wrapped my head around how that'd look.
self.q = nn.Conv2d(dim, self.dim_qk, 1, stride=self.stride, bias=qkv_bias)
self.kv = nn.Conv2d(dim, self.dim_qk + self.dim_v, 1, bias=qkv_bias)
self.pos_embed = PosEmbedRel(
block_size=block_size // self.stride, win_size=self.win_size, dim_head=self.dim_head, scale=self.scale)
def reset_parameters(self):
std = self.q.weight.shape[1] ** -0.5 # fan-in
trunc_normal_(self.q.weight, std=std)
trunc_normal_(self.kv.weight, std=std)
trunc_normal_(self.pos_embed.height_rel, std=self.scale)
trunc_normal_(self.pos_embed.width_rel, std=self.scale)
def forward(self, x):
B, C, H, W = x.shape
assert H % self.block_size == 0 and W % self.block_size == 0
num_h_blocks = H // self.block_size
num_w_blocks = W // self.block_size
num_blocks = num_h_blocks * num_w_blocks
q = self.q(x)
q = F.unfold(q, kernel_size=self.block_size // self.stride, stride=self.block_size // self.stride)
# B, num_heads * dim_head * block_size ** 2, num_blocks
q = q.reshape(B * self.num_heads, self.dim_head, -1, num_blocks).transpose(1, 3)
# B * num_heads, num_blocks, block_size ** 2, dim_head
kv = self.kv(x)
# FIXME I 'think' this unfold does what I want it to, but I should investigate
kv = F.unfold(kv, kernel_size=self.win_size, stride=self.block_size, padding=self.halo_size)
kv = kv.reshape(
B * self.num_heads, self.dim_head + (self.dim_v // self.num_heads), -1, num_blocks).transpose(1, 3)
k, v = torch.split(kv, [self.dim_head, self.dim_v // self.num_heads], dim=-1)
attn_logits = (q @ k.transpose(-1, -2)) * self.scale # FIXME should usual attn scale be applied?
attn_logits = attn_logits + self.pos_embed(q) # B * num_heads, block_size ** 2, win_size ** 2
attn_out = attn_logits.softmax(dim=-1)
attn_out = (attn_out @ v).transpose(1, 3) # B * num_heads, dim_v // num_heads, block_size ** 2, num_blocks
attn_out = F.fold(
attn_out.reshape(B, -1, num_blocks),
(H // self.stride, W // self.stride),
kernel_size=self.block_size // self.stride, stride=self.block_size // self.stride)
# B, dim_out, H // stride, W // stride
return attn_out
| [
"[email protected]"
] | |
9090de0981c2a4334712d26275bc1f06aeb6a383 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_lees.py | 0c52ef364ea8500533dfc82c6c80a47232551d67 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py |
from xai.brain.wordbase.nouns._lee import _LEE
#calss header
class _LEES(_LEE, ):
def __init__(self,):
_LEE.__init__(self)
self.name = "LEES"
self.specie = 'nouns'
self.basic = "lee"
self.jsondata = {}
| [
"[email protected]"
] | |
e86906546d5709bb143c540a3d02b9fb77e10673 | 27b86f422246a78704e0e84983b2630533a47db6 | /tests/test_05_tools/test_534_dwg_info.py | 3b9e92c3d0eced9c637c8286aceef1c82a2dbdfa | [
"MIT"
] | permissive | mozman/ezdxf | 7512decd600896960660f0f580cab815bf0d7a51 | ba6ab0264dcb6833173042a37b1b5ae878d75113 | refs/heads/master | 2023-09-01T11:55:13.462105 | 2023-08-15T11:50:05 | 2023-08-15T12:00:04 | 79,697,117 | 750 | 194 | MIT | 2023-09-14T09:40:41 | 2017-01-22T05:55:55 | Python | UTF-8 | Python | false | false | 988 | py | # Copyright (c) 2022, Manfred Moitzi
# License: MIT License
import pytest
from ezdxf.dwginfo import dwg_info
R12 = "41 43 31 30 30 39"
R2000 = "41 43 31 30 31 35"
R2018 = "41 43 31 30 33 32"
R20XX = "41 43 31 30 33 33"
unknown1 = "32 32 31 30 33 32"
unknown2 = ""
def data(s) -> bytes:
return bytes(int(x, 16) for x in s.split())
@pytest.mark.parametrize(
"s,ver,rel",
[
(R12, "AC1009", "R12"),
(R2000, "AC1015", "R2000"),
(R2018, "AC1032", "R2018"),
(R20XX, "AC1033", "unknown"),
],
ids=["R12", "R2000", "R2018", "unknown"],
)
def test_detect(s, ver, rel):
info = dwg_info(data(s))
assert info.version == ver
assert info.release == rel
@pytest.mark.parametrize(
"s", [unknown1, unknown2],
ids=["invalid", "empty"],
)
def test_detect_invalid(s):
info = dwg_info(data(s))
assert info.version == "invalid"
assert info.release == "invalid"
if __name__ == "__main__":
pytest.main([__file__])
| [
"[email protected]"
] | |
f60fa889f48e5d98c9ed095639ff9bbcdbced23b | 364085d006bb0e31f915091a07125501ab455277 | /amplify/agent/util/http.py | 2a6ec08a67743e03fcc23d237fde672d3aa78fd6 | [
"BSD-2-Clause"
] | permissive | digideskio/digidesk-amplified | 6f64768c28b7ecc32088259f07498df6956341ae | 547f899d6fd47dc726df28ee90bf3511f02bd6cf | refs/heads/master | 2020-12-30T23:21:08.300692 | 2016-04-14T12:20:01 | 2016-04-14T12:20:01 | 56,352,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,597 | py | # -*- coding: utf-8 -*-
import ujson
import time
import requests
import logging
import zlib
from amplify.agent import Singleton
from amplify.agent.context import context
requests.packages.urllib3.disable_warnings()
"""
WHY DO YOU DISABLE THIS WARNING?
We don't want to show you redundant messages.
IS IT A REAL PROBLEM?
No. It is not a real problem.
It's just a notification that urllib3 uses standard Python SSL library.
GIVE ME MORE DETAILS!
By default, urllib3 uses the standard library’s ssl module.
Unfortunately, there are several limitations which are addressed by PyOpenSSL.
In order to work with Python OpenSSL bindings urllib3 needs
requests[security] to be installed, which contains cryptography,
pyopenssl and other modules.
The problem is we CAN'T ship Amplify with built-in OpenSSL & cryptography.
You can install those libs manually and enable warnings back.
More details: https://urllib3.readthedocs.org/en/latest/security.html#pyopenssl
"""
__author__ = "Mike Belov"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__credits__ = ["Mike Belov", "Andrei Belov", "Ivan Poluyanov", "Oleg Mamontov", "Andrew Alexeev", "Grant Hulegaard"]
__license__ = ""
__maintainer__ = "Mike Belov"
__email__ = "[email protected]"
class HTTPClient(Singleton):
def __init__(self):
config = context.app_config
self.timeout = float(config['cloud']['api_timeout'])
self.verify_ssl_cert = config['cloud']['verify_ssl_cert']
self.gzip = config['cloud']['gzip']
self.session = None
self.url = None
self.proxies = config.get('proxies') # Support old configs which don't have 'proxies' section
if self.proxies and self.proxies.get('https', '') == '':
self.proxies = None # Pass None to trigger requests default scraping of environment variables
self.update_cloud_url()
logging.getLogger("requests").setLevel(logging.WARNING)
def update_cloud_url(self):
config = context.app_config
content_type = 'binary/octet-stream' if self.gzip else 'application/json'
self.url = '%s/%s' % (config['cloud']['api_url'], config['credentials']['api_key'])
self.session = requests.Session()
self.session.headers.update({
'Content-Type': content_type,
'User-Agent': 'nginx-amplify-agent/%s' % context.version
})
def make_request(self, location, method, data=None, timeout=None, json=True, log=True):
url = location if location.startswith('http') else '%s/%s' % (self.url, location)
timeout = timeout if timeout is not None else self.timeout
payload = ujson.encode(data) if data else '{}'
payload = zlib.compress(payload, self.gzip) if self.gzip else payload
start_time = time.time()
result, http_code = '', 500
try:
if method == 'get':
r = self.session.get(
url,
timeout=timeout,
verify=self.verify_ssl_cert,
proxies=self.proxies
)
else:
r = self.session.post(
url,
data=payload,
timeout=timeout,
verify=self.verify_ssl_cert,
proxies=self.proxies
)
http_code = r.status_code
r.raise_for_status()
result = r.json() if json else r.text
return result
except Exception as e:
if log:
context.log.error('failed %s "%s", exception: "%s"' % (method.upper(), url, e.message))
context.log.debug('', exc_info=True)
raise e
finally:
end_time = time.time()
log_method = context.log.info if log else context.log.debug
context.log.debug(result)
log_method(
"%s %s %s %s %s %.3f" % (method, url, http_code, len(payload), len(result), end_time - start_time)
)
def post(self, url, data=None, timeout=None, json=True):
return self.make_request(url, 'post', data=data, timeout=timeout, json=json)
def get(self, url, timeout=None, json=True, log=True):
return self.make_request(url, 'get', timeout=timeout, json=json, log=log)
def resolve_uri(uri):
"""
Resolves uri if it's not absolute
:param uri: str uri
:return: str url
"""
if not(uri.startswith('http://') or uri.startswith('https://')):
return '127.0.0.1%s' % uri
else:
return uri
| [
"[email protected]"
] | |
e0bfd11f7270a4b660b186cb8e2368ef570c68ff | aa0bf4e774ff82065927dbddf34be19c09b64c9c | /examples/ex1.py | 64ac038d0826dd17f9a516ada94d2873b01d30ab | [
"BSD-3-Clause"
] | permissive | grst/ipymd | 510ea6feb2726fadfe24ebbcbf3981c104fad8d8 | 4a57c4212b8e71848d51826859c2a3e478037e28 | refs/heads/grst | 2023-04-02T14:38:34.154687 | 2020-12-02T11:37:38 | 2020-12-02T11:37:38 | 87,005,381 | 38 | 6 | BSD-3-Clause | 2018-08-28T11:33:46 | 2017-04-02T18:08:34 | HTML | UTF-8 | Python | false | false | 511 | py | # List of ipymd cells expected for this example.
output = [
{'cell_type': 'markdown',
'source': '# Header'},
{'cell_type': 'markdown',
'source': 'A paragraph.'},
{'cell_type': 'markdown',
'source': 'Python code:'},
{'cell_type': 'code',
'input': 'print("Hello world!")',
'output': 'Hello world!'},
{'cell_type': 'markdown',
'source': 'JavaScript code:'},
{'cell_type': 'markdown',
'source': '```javascript\nconsole.log("Hello world!");\n```'}
]
| [
"[email protected]"
] | |
48218e1f9f444bb01ae4752bef0b91bea2ed4dcb | c6a101547c2b7f36fe83a725974a8a7f02cf176d | /data_structures/bst/bt_to_bst.py | a3943ce19459c0432a31bde8205bf3bcf1beb69f | [
"MIT"
] | permissive | prabhupant/python-ds | 737cc35574de5c2ece0f0813cf00775324a8dbe7 | f7d6d78fedaf84b7527965bb1798b7a8da989474 | refs/heads/master | 2023-08-22T05:04:22.937675 | 2022-10-04T01:29:39 | 2022-10-04T01:29:39 | 199,366,418 | 2,325 | 704 | MIT | 2022-10-10T13:01:10 | 2019-07-29T02:48:57 | Python | UTF-8 | Python | false | false | 767 | py | class Node:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def store_inorder(root, inorder):
if root is None:
return
store_inorder(root.left, inorder)
inorder.append(root.data)
store_inorder(root.right, inorder)
def count_nodes(root):
if root is None:
return 0
return count_nodes(root.left) + count_nodes(root.right) + 1
def array_to_bst(arr, root):
if root is None:
return
array_to_bst(arr, root.left)
root.data = arr[0]
arr.pop(0)
array_to_bst(arr, root.right)
def bt_to_bst(root):
if root is None:
return
n = count_nodes(root)
arr = []
store_inorder(root, arr)
arr.sort()
array_to_bst(arr, root)
| [
"[email protected]"
] | |
0fa67e76425e468c985c3025e54b6202be4272fd | f9e265f39cdfa568e67acb50840f9655fc4d65f7 | /builtinfunctionstypes.py | b39cd0b6875e905da2d2d93d4b3b82210ca30fcf | [] | no_license | raymondmar61/pythonwilliamfiset | 5a4fc7faba6880f3df6b3ded98cc6d17925e7895 | aae7c533f48efbe91a4e7c2d640f2032cd97e1f3 | refs/heads/master | 2021-01-25T04:42:19.546278 | 2017-09-28T22:15:30 | 2017-09-28T22:15:30 | 93,469,377 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,482 | py | #williamfiset 30 Builtins 5 of 6 Types
# tuple, list, str, bool, int, float
from math import pi as PIE
print(tuple("My_Python")) #print ('M', 'y', '_', 'P', 'y', 't', 'h', 'o', 'n')
print(tuple((1,2,3))) #print (1, 2, 3)
print(tuple( ['G','N','U'] )) #print ('G', 'N', 'U'). List becomes a tuple
print(list(range(10))) #print [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
print(list("23456")) #print ['2', '3', '4', '5', '6']. Separates each string character into a list with elements
print(list((1,2,3,4))) #print [1, 2, 3, 4]. Tuple becomes a list.
print(str(True)) #print True
print(str("1234567")) #print 1234567
print(str(PIE)) #print 3.141592653589793
print(bool(1>3)) #print False boolean returns True or False
print(bool('a' < 'v')) #print True boolean returns True or False
print(bool(1==1)) #print True boolean returns True or False
print(int(456)) #print 456
print(int("453")) #print 453 converts string to integer
#print(int( [567] )) #error message because can't convert a list to an integer
print(float(PIE)) #print 3.141592653589793
print(float("1.474")) #print 1.474
print(float(508)) #print 508.0
#set an unordered list of unique elements, final result is a list with no duplicates
list_ = [1,1,1,2,3,4,4,4]
print(set(list_)) #print {1, 2, 3, 4}
print("\n")
my_set = set()
my_set.add(5)
my_set.add(1)
my_set.add(2)
print(my_set) #print {1, 2, 5}
my_set.update([11,1,6,8])
print(my_set) #print {1, 2, 5, 6, 8, 11}
print(list(my_set)) #print [1, 2, 5, 6, 8, 11] as a list | [
"[email protected]"
] | |
57145efae0a73a250ab079b71515694b7e3fa35e | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2007.2/desktop/freedesktop/hal/actions.py | 8548d7eb4c9dc4cbaff444aa99ef55843bd1fd65 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,796 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005-2007 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import libtools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
WorkDir = "hal-0.5.9.1"
def setup():
autotools.configure("--enable-policy-kit \
--enable-acpi-ibm \
--enable-acpi-toshiba \
--with-dell-backlight \
--enable-umount-helper \
--enable-sonypic \
--enable-doxygen-docs \
--with-usb-csr \
--with-macbook \
--with-macbookpro \
--with-cpufreq \
--with-hal-user=hal \
--with-hal-group=hal \
--with-dbus-sys=/etc/dbus-1/system.d \
--disable-docbook-docs \
--disable-gtk-doc \
--disable-static \
--with-pid-file=/var/run/hald.pid")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
# We install this in a seperate package to avoid gnome-python dep
pisitools.remove("/usr/bin/hal-device-manager")
pisitools.removeDir("/usr/share/hal/device-manager/")
# See ya...
pisitools.removeDir("/etc/hotplug.d/")
pisitools.dodoc("AUTHORS", "COPYING", "ChangeLog", "NEWS", "README")
# Needed for hal's new cache infrastructure
pisitools.dodir("/var/lib/cache/hald/")
| [
"[email protected]"
] | |
54c170d0ddb924fd6e0c42ffd2a7d2a6f7895aea | 8ef8e6818c977c26d937d09b46be0d748022ea09 | /cv/detection/autoassign/pytorch/mmdet/apis/inference.py | b3e6f862f8b76afdf8b73439796ac89352b2d48b | [
"Apache-2.0"
] | permissive | Deep-Spark/DeepSparkHub | eb5996607e63ccd2c706789f64b3cc0070e7f8ef | 9d643e88946fc4a24f2d4d073c08b05ea693f4c5 | refs/heads/master | 2023-09-01T11:26:49.648759 | 2023-08-25T01:50:18 | 2023-08-25T01:50:18 | 534,133,249 | 7 | 6 | Apache-2.0 | 2023-03-28T02:54:59 | 2022-09-08T09:07:01 | Python | UTF-8 | Python | false | false | 8,736 | py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
from pathlib import Path
import mmcv
import numpy as np
import torch
from mmcv.ops import RoIPool
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmdet.core import get_classes
from mmdet.datasets import replace_ImageToTensor
from mmdet.datasets.pipelines import Compose
from mmdet.models import build_detector
def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=None):
"""Initialize a detector from config file.
Args:
config (str, :obj:`Path`, or :obj:`mmcv.Config`): Config file path,
:obj:`Path`, or the config object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
cfg_options (dict): Options to override some settings in the used
config.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, (str, Path)):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
if cfg_options is not None:
config.merge_from_dict(cfg_options)
if 'pretrained' in config.model:
config.model.pretrained = None
elif 'init_cfg' in config.model.backbone:
config.model.backbone.init_cfg = None
config.model.train_cfg = None
model = build_detector(config.model, test_cfg=config.get('test_cfg'))
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
warnings.simplefilter('once')
warnings.warn('Class names are not saved in the checkpoint\'s '
'meta data, use COCO classes by default.')
model.CLASSES = get_classes('coco')
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
class LoadImage:
"""Deprecated.
A simple pipeline to load image.
"""
def __call__(self, results):
"""Call function to load images into results.
Args:
results (dict): A result dict contains the file name
of the image to be read.
Returns:
dict: ``results`` will be returned containing loaded image.
"""
warnings.simplefilter('once')
warnings.warn('`LoadImage` is deprecated and will be removed in '
'future releases. You may use `LoadImageFromWebcam` '
'from `mmdet.datasets.pipelines.` instead.')
if isinstance(results['img'], str):
results['filename'] = results['img']
results['ori_filename'] = results['img']
else:
results['filename'] = None
results['ori_filename'] = None
img = mmcv.imread(results['img'])
results['img'] = img
results['img_fields'] = ['img']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results
def inference_detector(model, imgs):
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str/ndarray or list[str/ndarray] or tuple[str/ndarray]):
Either image files or loaded images.
Returns:
If imgs is a list or tuple, the same length list type results
will be returned, otherwise return the detection results directly.
"""
if isinstance(imgs, (list, tuple)):
is_batch = True
else:
imgs = [imgs]
is_batch = False
cfg = model.cfg
device = next(model.parameters()).device # model device
if isinstance(imgs[0], np.ndarray):
cfg = cfg.copy()
# set loading pipeline type
cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
test_pipeline = Compose(cfg.data.test.pipeline)
datas = []
for img in imgs:
# prepare data
if isinstance(img, np.ndarray):
# directly add img
data = dict(img=img)
else:
# add information into dict
data = dict(img_info=dict(filename=img), img_prefix=None)
# build the data pipeline
data = test_pipeline(data)
datas.append(data)
data = collate(datas, samples_per_gpu=len(imgs))
# just get the actual data from DataContainer
data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']]
data['img'] = [img.data[0] for img in data['img']]
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
else:
for m in model.modules():
assert not isinstance(
m, RoIPool
), 'CPU inference with RoIPool is not supported currently.'
# forward the model
with torch.no_grad():
results = model(return_loss=False, rescale=True, **data)
if not is_batch:
return results[0]
else:
return results
async def async_inference_detector(model, imgs):
"""Async inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
img (str | ndarray): Either image files or loaded images.
Returns:
Awaitable detection results.
"""
if not isinstance(imgs, (list, tuple)):
imgs = [imgs]
cfg = model.cfg
device = next(model.parameters()).device # model device
if isinstance(imgs[0], np.ndarray):
cfg = cfg.copy()
# set loading pipeline type
cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
test_pipeline = Compose(cfg.data.test.pipeline)
datas = []
for img in imgs:
# prepare data
if isinstance(img, np.ndarray):
# directly add img
data = dict(img=img)
else:
# add information into dict
data = dict(img_info=dict(filename=img), img_prefix=None)
# build the data pipeline
data = test_pipeline(data)
datas.append(data)
data = collate(datas, samples_per_gpu=len(imgs))
# just get the actual data from DataContainer
data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']]
data['img'] = [img.data[0] for img in data['img']]
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
else:
for m in model.modules():
assert not isinstance(
m, RoIPool
), 'CPU inference with RoIPool is not supported currently.'
# We don't restore `torch.is_grad_enabled()` value during concurrent
# inference since execution can overlap
torch.set_grad_enabled(False)
results = await model.aforward_test(rescale=True, **data)
return results
def show_result_pyplot(model,
img,
result,
score_thr=0.3,
title='result',
wait_time=0,
palette=None,
out_file=None):
"""Visualize the detection results on the image.
Args:
model (nn.Module): The loaded detector.
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The detection result, can be either
(bbox, segm) or just bbox.
score_thr (float): The threshold to visualize the bboxes and masks.
title (str): Title of the pyplot figure.
wait_time (float): Value of waitKey param. Default: 0.
palette (str or tuple(int) or :obj:`Color`): Color.
The tuple of color should be in BGR order.
out_file (str or None): The path to write the image.
Default: None.
"""
if hasattr(model, 'module'):
model = model.module
model.show_result(
img,
result,
score_thr=score_thr,
show=True,
wait_time=wait_time,
win_name=title,
bbox_color=palette,
text_color=(200, 200, 200),
mask_color=palette,
out_file=out_file)
| [
"[email protected]"
] | |
971cbd5365ebabe295b53bc89246a7dab9884348 | 18239524612cf572bfeaa3e001a3f5d1b872690c | /clients/oathkeeper/python/ory_oathkeeper_client/models/swagger_create_rule_parameters.py | fa695454e4e5de58f8d17064a76a47b6c668c873 | [
"Apache-2.0"
] | permissive | simoneromano96/sdk | 2d7af9425dabc30df830a09b26841fb2e8781bf8 | a6113d0daefbbb803790297e4b242d4c7cbbcb22 | refs/heads/master | 2023-05-09T13:50:45.485951 | 2021-05-28T12:18:27 | 2021-05-28T12:18:27 | 371,689,133 | 0 | 0 | Apache-2.0 | 2021-05-28T12:11:41 | 2021-05-28T12:11:40 | null | UTF-8 | Python | false | false | 3,509 | py | # coding: utf-8
"""
ORY Oathkeeper
ORY Oathkeeper is a reverse proxy that checks the HTTP Authorization for validity against a set of rules. This service uses Hydra to validate access tokens and policies. # noqa: E501
The version of the OpenAPI document: v0.0.0-alpha.37
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from ory_oathkeeper_client.configuration import Configuration
class SwaggerCreateRuleParameters(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'body': 'SwaggerRule'
}
attribute_map = {
'body': 'Body'
}
def __init__(self, body=None, local_vars_configuration=None): # noqa: E501
"""SwaggerCreateRuleParameters - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._body = None
self.discriminator = None
if body is not None:
self.body = body
@property
def body(self):
"""Gets the body of this SwaggerCreateRuleParameters. # noqa: E501
:return: The body of this SwaggerCreateRuleParameters. # noqa: E501
:rtype: SwaggerRule
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this SwaggerCreateRuleParameters.
:param body: The body of this SwaggerCreateRuleParameters. # noqa: E501
:type: SwaggerRule
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SwaggerCreateRuleParameters):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SwaggerCreateRuleParameters):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
4415b7bf0416b19028465628c5d14a17d2e84962 | 932a6797f1e97c7f8c96af83647fc27d2324765e | /python/1138. Alphabet Board Path.py | 6b59f12b589ba7b471f1b8f5ad023ddb023c67ed | [] | no_license | rhzx3519/leetcode | 1245e0a19dbcb4b853eb0ac369601f31171b55c5 | 2fe336e0de336f6d5f67b058ddb5cf50c9f00d4e | refs/heads/master | 2023-08-17T01:27:49.674440 | 2023-08-16T23:08:59 | 2023-08-16T23:08:59 | 85,682,362 | 3 | 1 | null | 2021-05-08T12:10:56 | 2017-03-21T09:23:02 | Python | UTF-8 | Python | false | false | 1,864 | py | class Solution(object):
def alphabetBoardPath(self, target):
"""
:type target: str
:rtype: str
"""
board = ["abcde", "fghij", "klmno", "pqrst", "uvwxy", "z"]
m, n = len(board), len(board[0])
vis = [[-1]*len(board[0]) for _ in range(len(board))]
dirt = 'RLUD'
d = {(-1, 0): 'U', (1, 0): 'D', (0, -1): 'L', (0, 1): 'R'}
def bfs(r, c, t):
que = [(r, c)]
while que:
x, y = que.pop(0)
# print x, y
if board[x][y]==t:
return (x, y)
for i, (dx, dy) in enumerate(((1, 0), (-1, 0), (0, 1), (0, -1))):
nx = x + dx
ny = y + dy
if nx<0 or nx>=m or ny<0 or ny>=len(board[nx]) or vis[nx][ny] != -1:
continue
vis[nx][ny] = (x, y)
que.append((nx, ny))
return (-1, -1)
def find(start, end):
prev = [end]
while end != start:
end = vis[end[0]][end[1]]
prev.append(end)
# print prev
cmd = ['!']
for i in range(1, len(prev)):
k = (prev[i-1][0] - prev[i][0], prev[i-1][1] - prev[i][1])
cmd.append(d[k])
# print cmd
return ''.join(cmd[::-1])
ans = []
r = c = 0
for t in target:
vis = [[-1]*n for _ in range(m)]
end = bfs(r, c, t)
# print vis
path = find((r, c), end)
r, c = end
# print (r, c), t
ans.append(path)
# print ans
return ''.join(ans)
if __name__ == '__main__':
target = 'leet'
su = Solution()
su.alphabetBoardPath(target) | [
"[email protected]"
] | |
a344d7024b0846d0428ce64b15b6e3afecb52464 | 1ee910d6602123eb1328f56419b04e31b3761b6b | /bin/pilfile.py | 9958e72c9cff0d5f0c078db012a9bcb82355fa89 | [
"MIT"
] | permissive | mraza007/Pizza-or-Not-a-Pizza | 7fc89e0905c86fbd3c77a9cc834a4b6098912aeb | 6ad59d046adbd6be812c7403d9cb8ffbdbd6b0b8 | refs/heads/master | 2022-12-15T15:47:34.779838 | 2018-07-04T02:28:56 | 2018-07-04T02:28:56 | 127,992,302 | 30 | 4 | MIT | 2022-11-22T00:43:51 | 2018-04-04T01:56:26 | Python | UTF-8 | Python | false | false | 2,604 | py | #!/home/muhammad/image-recognition/bin/python3
#
# The Python Imaging Library.
# $Id$
#
# a utility to identify image files
#
# this script identifies image files, extracting size and
# pixel mode information for known file formats. Note that
# you don't need the PIL C extension to use this module.
#
# History:
# 0.0 1995-09-01 fl Created
# 0.1 1996-05-18 fl Modified options, added debugging mode
# 0.2 1996-12-29 fl Added verify mode
# 0.3 1999-06-05 fl Don't mess up on class exceptions (1.5.2 and later)
# 0.4 2003-09-30 fl Expand wildcards on Windows; robustness tweaks
#
from __future__ import print_function
import getopt
import glob
import sys
from PIL import Image
if len(sys.argv) == 1:
print("PIL File 0.4/2003-09-30 -- identify image files")
print("Usage: pilfile [option] files...")
print("Options:")
print(" -f list supported file formats")
print(" -i show associated info and tile data")
print(" -v verify file headers")
print(" -q quiet, don't warn for unidentified/missing/broken files")
sys.exit(1)
try:
opt, args = getopt.getopt(sys.argv[1:], "fqivD")
except getopt.error as v:
print(v)
sys.exit(1)
verbose = quiet = verify = 0
for o, a in opt:
if o == "-f":
Image.init()
id = sorted(Image.ID)
print("Supported formats:")
for i in id:
print(i, end=' ')
sys.exit(1)
elif o == "-i":
verbose = 1
elif o == "-q":
quiet = 1
elif o == "-v":
verify = 1
elif o == "-D":
Image.DEBUG += 1
def globfix(files):
# expand wildcards where necessary
if sys.platform == "win32":
out = []
for file in files:
if glob.has_magic(file):
out.extend(glob.glob(file))
else:
out.append(file)
return out
return files
for file in globfix(args):
try:
im = Image.open(file)
print("%s:" % file, im.format, "%dx%d" % im.size, im.mode, end=' ')
if verbose:
print(im.info, im.tile, end=' ')
print()
if verify:
try:
im.verify()
except:
if not quiet:
print("failed to verify image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
except IOError as v:
if not quiet:
print(file, "failed:", v)
except:
import traceback
if not quiet:
print(file, "failed:", "unexpected error")
traceback.print_exc(file=sys.stdout)
| [
"[email protected]"
] | |
bee9bac253802fcac7d2c1eb66160555ca7defa8 | 3345eebefad6f4348f29cdec2f2d59a89c843861 | /mac/shop/migrations/0003_contact.py | ef5562ab9bfba669d4149c8dd469d07e2bea2681 | [] | no_license | AyushiiJain/My-Ecommerce-Website | fc8e9ccc2a106f2341e1fcb5b718679e2fd7b3bd | 9bdfc9bbd1c7d4573db2d9138b9996abe2f5c1ad | refs/heads/master | 2023-05-17T09:24:23.932158 | 2021-06-09T08:54:36 | 2021-06-09T08:54:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | # Generated by Django 3.2.2 on 2021-05-11 07:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0002_auto_20210508_0633'),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('msg_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('email', models.CharField(default='', max_length=70)),
('phone', models.CharField(default='', max_length=70)),
('desc', models.CharField(default='', max_length=500)),
],
),
]
| [
"[email protected]"
] | |
b8c8c14f81dd191c2eef1374e99d8ae9fd4ca9c4 | 00af09f4ac6f98203910d86c3791c152184ace9a | /Lib/plistlib.py | 0a7bee5507f5fd3113ea27573283e2142f88e3e7 | [] | no_license | orf53975/CarnosOS | 621d641df02d742a2452fde2f28a28c74b32695a | d06849064e4e9f30ef901ad8cf90960e1bec0805 | refs/heads/master | 2023-03-24T08:06:48.274566 | 2017-01-05T16:41:01 | 2017-01-05T16:41:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94,571 | py | <<<<<<< HEAD
<<<<<<< HEAD
r"""plistlib.py -- a tool to generate and parse MacOSX .plist files.
The property list (.plist) file format is a simple XML pickle supporting
basic object types, like dictionaries, lists, numbers and strings.
Usually the top level object is a dictionary.
To write out a plist file, use the dump(value, file)
function. 'value' is the top level object, 'file' is
a (writable) file object.
To parse a plist from a file, use the load(file) function,
with a (readable) file object as the only argument. It
returns the top level object (again, usually a dictionary).
To work with plist data in bytes objects, you can use loads()
and dumps().
Values can be strings, integers, floats, booleans, tuples, lists,
dictionaries (but only with string keys), Data, bytes, bytearray, or
datetime.datetime objects.
Generate Plist example:
pl = dict(
aString = "Doodah",
aList = ["A", "B", 12, 32.1, [1, 2, 3]],
aFloat = 0.1,
anInt = 728,
aDict = dict(
anotherString = "<hello & hi there!>",
aUnicodeValue = "M\xe4ssig, Ma\xdf",
aTrueValue = True,
aFalseValue = False,
),
someData = b"<binary gunk>",
someMoreData = b"<lots of binary gunk>" * 10,
aDate = datetime.datetime.fromtimestamp(time.mktime(time.gmtime())),
)
with open(fileName, 'wb') as fp:
dump(pl, fp)
Parse Plist example:
with open(fileName, 'rb') as fp:
pl = load(fp)
print(pl["aKey"])
"""
__all__ = [
"readPlist", "writePlist", "readPlistFromBytes", "writePlistToBytes",
"Plist", "Data", "Dict", "FMT_XML", "FMT_BINARY",
"load", "dump", "loads", "dumps"
]
import binascii
import codecs
import contextlib
import datetime
import enum
from io import BytesIO
import itertools
import os
import re
import struct
from warnings import warn
from xml.parsers.expat import ParserCreate
PlistFormat = enum.Enum('PlistFormat', 'FMT_XML FMT_BINARY', module=__name__)
globals().update(PlistFormat.__members__)
#
#
# Deprecated functionality
#
#
class _InternalDict(dict):
# This class is needed while Dict is scheduled for deprecation:
# we only need to warn when a *user* instantiates Dict or when
# the "attribute notation for dict keys" is used.
__slots__ = ()
def __getattr__(self, attr):
try:
value = self[attr]
except KeyError:
raise AttributeError(attr)
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", DeprecationWarning, 2)
return value
def __setattr__(self, attr, value):
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", DeprecationWarning, 2)
self[attr] = value
def __delattr__(self, attr):
try:
del self[attr]
except KeyError:
raise AttributeError(attr)
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", DeprecationWarning, 2)
class Dict(_InternalDict):
def __init__(self, **kwargs):
warn("The plistlib.Dict class is deprecated, use builtin dict instead",
DeprecationWarning, 2)
super().__init__(**kwargs)
@contextlib.contextmanager
def _maybe_open(pathOrFile, mode):
if isinstance(pathOrFile, str):
with open(pathOrFile, mode) as fp:
yield fp
else:
yield pathOrFile
class Plist(_InternalDict):
"""This class has been deprecated. Use dump() and load()
functions instead, together with regular dict objects.
"""
def __init__(self, **kwargs):
warn("The Plist class is deprecated, use the load() and "
"dump() functions instead", DeprecationWarning, 2)
super().__init__(**kwargs)
@classmethod
def fromFile(cls, pathOrFile):
"""Deprecated. Use the load() function instead."""
with _maybe_open(pathOrFile, 'rb') as fp:
value = load(fp)
plist = cls()
plist.update(value)
return plist
def write(self, pathOrFile):
"""Deprecated. Use the dump() function instead."""
with _maybe_open(pathOrFile, 'wb') as fp:
dump(self, fp)
def readPlist(pathOrFile):
"""
Read a .plist from a path or file. pathOrFile should either
be a file name, or a readable binary file object.
This function is deprecated, use load instead.
"""
warn("The readPlist function is deprecated, use load() instead",
DeprecationWarning, 2)
with _maybe_open(pathOrFile, 'rb') as fp:
return load(fp, fmt=None, use_builtin_types=False,
dict_type=_InternalDict)
def writePlist(value, pathOrFile):
"""
Write 'value' to a .plist file. 'pathOrFile' may either be a
file name or a (writable) file object.
This function is deprecated, use dump instead.
"""
warn("The writePlist function is deprecated, use dump() instead",
DeprecationWarning, 2)
with _maybe_open(pathOrFile, 'wb') as fp:
dump(value, fp, fmt=FMT_XML, sort_keys=True, skipkeys=False)
def readPlistFromBytes(data):
"""
Read a plist data from a bytes object. Return the root object.
This function is deprecated, use loads instead.
"""
warn("The readPlistFromBytes function is deprecated, use loads() instead",
DeprecationWarning, 2)
return load(BytesIO(data), fmt=None, use_builtin_types=False,
dict_type=_InternalDict)
def writePlistToBytes(value):
"""
Return 'value' as a plist-formatted bytes object.
This function is deprecated, use dumps instead.
"""
warn("The writePlistToBytes function is deprecated, use dumps() instead",
DeprecationWarning, 2)
f = BytesIO()
dump(value, f, fmt=FMT_XML, sort_keys=True, skipkeys=False)
return f.getvalue()
class Data:
"""
Wrapper for binary data.
This class is deprecated, use a bytes object instead.
"""
def __init__(self, data):
if not isinstance(data, bytes):
raise TypeError("data must be as bytes")
self.data = data
@classmethod
def fromBase64(cls, data):
# base64.decodebytes just calls binascii.a2b_base64;
# it seems overkill to use both base64 and binascii.
return cls(_decode_base64(data))
def asBase64(self, maxlinelength=76):
return _encode_base64(self.data, maxlinelength)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.data == other.data
elif isinstance(other, str):
return self.data == other
else:
return id(self) == id(other)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self.data))
#
#
# End of deprecated functionality
#
#
#
# XML support
#
# XML 'header'
PLISTHEADER = b"""\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
"""
# Regex to find any control chars, except for \t \n and \r
_controlCharPat = re.compile(
r"[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f"
r"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]")
def _encode_base64(s, maxlinelength=76):
# copied from base64.encodebytes(), with added maxlinelength argument
maxbinsize = (maxlinelength//4)*3
pieces = []
for i in range(0, len(s), maxbinsize):
chunk = s[i : i + maxbinsize]
pieces.append(binascii.b2a_base64(chunk))
return b''.join(pieces)
def _decode_base64(s):
if isinstance(s, str):
return binascii.a2b_base64(s.encode("utf-8"))
else:
return binascii.a2b_base64(s)
# Contents should conform to a subset of ISO 8601
# (in particular, YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z'. Smaller units
# may be omitted with # a loss of precision)
_dateParser = re.compile(r"(?P<year>\d\d\d\d)(?:-(?P<month>\d\d)(?:-(?P<day>\d\d)(?:T(?P<hour>\d\d)(?::(?P<minute>\d\d)(?::(?P<second>\d\d))?)?)?)?)?Z", re.ASCII)
def _date_from_string(s):
order = ('year', 'month', 'day', 'hour', 'minute', 'second')
gd = _dateParser.match(s).groupdict()
lst = []
for key in order:
val = gd[key]
if val is None:
break
lst.append(int(val))
return datetime.datetime(*lst)
def _date_to_string(d):
return '%04d-%02d-%02dT%02d:%02d:%02dZ' % (
d.year, d.month, d.day,
d.hour, d.minute, d.second
)
def _escape(text):
m = _controlCharPat.search(text)
if m is not None:
raise ValueError("strings can't contains control characters; "
"use bytes instead")
text = text.replace("\r\n", "\n") # convert DOS line endings
text = text.replace("\r", "\n") # convert Mac line endings
text = text.replace("&", "&") # escape '&'
text = text.replace("<", "<") # escape '<'
text = text.replace(">", ">") # escape '>'
return text
class _PlistParser:
def __init__(self, use_builtin_types, dict_type):
self.stack = []
self.current_key = None
self.root = None
self._use_builtin_types = use_builtin_types
self._dict_type = dict_type
def parse(self, fileobj):
self.parser = ParserCreate()
self.parser.StartElementHandler = self.handle_begin_element
self.parser.EndElementHandler = self.handle_end_element
self.parser.CharacterDataHandler = self.handle_data
self.parser.ParseFile(fileobj)
return self.root
def handle_begin_element(self, element, attrs):
self.data = []
handler = getattr(self, "begin_" + element, None)
if handler is not None:
handler(attrs)
def handle_end_element(self, element):
handler = getattr(self, "end_" + element, None)
if handler is not None:
handler()
def handle_data(self, data):
self.data.append(data)
def add_object(self, value):
if self.current_key is not None:
if not isinstance(self.stack[-1], type({})):
raise ValueError("unexpected element at line %d" %
self.parser.CurrentLineNumber)
self.stack[-1][self.current_key] = value
self.current_key = None
elif not self.stack:
# this is the root object
self.root = value
else:
if not isinstance(self.stack[-1], type([])):
raise ValueError("unexpected element at line %d" %
self.parser.CurrentLineNumber)
self.stack[-1].append(value)
def get_data(self):
data = ''.join(self.data)
self.data = []
return data
# element handlers
def begin_dict(self, attrs):
d = self._dict_type()
self.add_object(d)
self.stack.append(d)
def end_dict(self):
if self.current_key:
raise ValueError("missing value for key '%s' at line %d" %
(self.current_key,self.parser.CurrentLineNumber))
self.stack.pop()
def end_key(self):
if self.current_key or not isinstance(self.stack[-1], type({})):
raise ValueError("unexpected key at line %d" %
self.parser.CurrentLineNumber)
self.current_key = self.get_data()
def begin_array(self, attrs):
a = []
self.add_object(a)
self.stack.append(a)
def end_array(self):
self.stack.pop()
def end_true(self):
self.add_object(True)
def end_false(self):
self.add_object(False)
def end_integer(self):
self.add_object(int(self.get_data()))
def end_real(self):
self.add_object(float(self.get_data()))
def end_string(self):
self.add_object(self.get_data())
def end_data(self):
if self._use_builtin_types:
self.add_object(_decode_base64(self.get_data()))
else:
self.add_object(Data.fromBase64(self.get_data()))
def end_date(self):
self.add_object(_date_from_string(self.get_data()))
class _DumbXMLWriter:
def __init__(self, file, indent_level=0, indent="\t"):
self.file = file
self.stack = []
self._indent_level = indent_level
self.indent = indent
def begin_element(self, element):
self.stack.append(element)
self.writeln("<%s>" % element)
self._indent_level += 1
def end_element(self, element):
assert self._indent_level > 0
assert self.stack.pop() == element
self._indent_level -= 1
self.writeln("</%s>" % element)
def simple_element(self, element, value=None):
if value is not None:
value = _escape(value)
self.writeln("<%s>%s</%s>" % (element, value, element))
else:
self.writeln("<%s/>" % element)
def writeln(self, line):
if line:
# plist has fixed encoding of utf-8
# XXX: is this test needed?
if isinstance(line, str):
line = line.encode('utf-8')
self.file.write(self._indent_level * self.indent)
self.file.write(line)
self.file.write(b'\n')
class _PlistWriter(_DumbXMLWriter):
def __init__(
self, file, indent_level=0, indent=b"\t", writeHeader=1,
sort_keys=True, skipkeys=False):
if writeHeader:
file.write(PLISTHEADER)
_DumbXMLWriter.__init__(self, file, indent_level, indent)
self._sort_keys = sort_keys
self._skipkeys = skipkeys
def write(self, value):
self.writeln("<plist version=\"1.0\">")
self.write_value(value)
self.writeln("</plist>")
def write_value(self, value):
if isinstance(value, str):
self.simple_element("string", value)
elif value is True:
self.simple_element("true")
elif value is False:
self.simple_element("false")
elif isinstance(value, int):
if -1 << 63 <= value < 1 << 64:
self.simple_element("integer", "%d" % value)
else:
raise OverflowError(value)
elif isinstance(value, float):
self.simple_element("real", repr(value))
elif isinstance(value, dict):
self.write_dict(value)
elif isinstance(value, Data):
self.write_data(value)
elif isinstance(value, (bytes, bytearray)):
self.write_bytes(value)
elif isinstance(value, datetime.datetime):
self.simple_element("date", _date_to_string(value))
elif isinstance(value, (tuple, list)):
self.write_array(value)
else:
raise TypeError("unsupported type: %s" % type(value))
def write_data(self, data):
self.write_bytes(data.data)
def write_bytes(self, data):
self.begin_element("data")
self._indent_level -= 1
maxlinelength = max(
16,
76 - len(self.indent.replace(b"\t", b" " * 8) * self._indent_level))
for line in _encode_base64(data, maxlinelength).split(b"\n"):
if line:
self.writeln(line)
self._indent_level += 1
self.end_element("data")
def write_dict(self, d):
if d:
self.begin_element("dict")
if self._sort_keys:
items = sorted(d.items())
else:
items = d.items()
for key, value in items:
if not isinstance(key, str):
if self._skipkeys:
continue
raise TypeError("keys must be strings")
self.simple_element("key", key)
self.write_value(value)
self.end_element("dict")
else:
self.simple_element("dict")
def write_array(self, array):
if array:
self.begin_element("array")
for value in array:
self.write_value(value)
self.end_element("array")
else:
self.simple_element("array")
def _is_fmt_xml(header):
prefixes = (b'<?xml', b'<plist')
for pfx in prefixes:
if header.startswith(pfx):
return True
# Also check for alternative XML encodings, this is slightly
# overkill because the Apple tools (and plistlib) will not
# generate files with these encodings.
for bom, encoding in (
(codecs.BOM_UTF8, "utf-8"),
(codecs.BOM_UTF16_BE, "utf-16-be"),
(codecs.BOM_UTF16_LE, "utf-16-le"),
# expat does not support utf-32
#(codecs.BOM_UTF32_BE, "utf-32-be"),
#(codecs.BOM_UTF32_LE, "utf-32-le"),
):
if not header.startswith(bom):
continue
for start in prefixes:
prefix = bom + start.decode('ascii').encode(encoding)
if header[:len(prefix)] == prefix:
return True
return False
#
# Binary Plist
#
class InvalidFileException (ValueError):
def __init__(self, message="Invalid file"):
ValueError.__init__(self, message)
_BINARY_FORMAT = {1: 'B', 2: 'H', 4: 'L', 8: 'Q'}
class _BinaryPlistParser:
"""
Read or write a binary plist file, following the description of the binary
format. Raise InvalidFileException in case of error, otherwise return the
root object.
see also: http://opensource.apple.com/source/CF/CF-744.18/CFBinaryPList.c
"""
def __init__(self, use_builtin_types, dict_type):
self._use_builtin_types = use_builtin_types
self._dict_type = dict_type
def parse(self, fp):
try:
# The basic file format:
# HEADER
# object...
# refid->offset...
# TRAILER
self._fp = fp
self._fp.seek(-32, os.SEEK_END)
trailer = self._fp.read(32)
if len(trailer) != 32:
raise InvalidFileException()
(
offset_size, self._ref_size, num_objects, top_object,
offset_table_offset
) = struct.unpack('>6xBBQQQ', trailer)
self._fp.seek(offset_table_offset)
self._object_offsets = self._read_ints(num_objects, offset_size)
return self._read_object(self._object_offsets[top_object])
except (OSError, IndexError, struct.error):
raise InvalidFileException()
def _get_size(self, tokenL):
""" return the size of the next object."""
if tokenL == 0xF:
m = self._fp.read(1)[0] & 0x3
s = 1 << m
f = '>' + _BINARY_FORMAT[s]
return struct.unpack(f, self._fp.read(s))[0]
return tokenL
def _read_ints(self, n, size):
data = self._fp.read(size * n)
if size in _BINARY_FORMAT:
return struct.unpack('>' + _BINARY_FORMAT[size] * n, data)
else:
return tuple(int.from_bytes(data[i: i + size], 'big')
for i in range(0, size * n, size))
def _read_refs(self, n):
return self._read_ints(n, self._ref_size)
def _read_object(self, offset):
"""
read the object at offset.
May recursively read sub-objects (content of an array/dict/set)
"""
self._fp.seek(offset)
token = self._fp.read(1)[0]
tokenH, tokenL = token & 0xF0, token & 0x0F
if token == 0x00:
return None
elif token == 0x08:
return False
elif token == 0x09:
return True
# The referenced source code also mentions URL (0x0c, 0x0d) and
# UUID (0x0e), but neither can be generated using the Cocoa libraries.
elif token == 0x0f:
return b''
elif tokenH == 0x10: # int
return int.from_bytes(self._fp.read(1 << tokenL),
'big', signed=tokenL >= 3)
elif token == 0x22: # real
return struct.unpack('>f', self._fp.read(4))[0]
elif token == 0x23: # real
return struct.unpack('>d', self._fp.read(8))[0]
elif token == 0x33: # date
f = struct.unpack('>d', self._fp.read(8))[0]
# timestamp 0 of binary plists corresponds to 1/1/2001
# (year of Mac OS X 10.0), instead of 1/1/1970.
return datetime.datetime.utcfromtimestamp(f + (31 * 365 + 8) * 86400)
elif tokenH == 0x40: # data
s = self._get_size(tokenL)
if self._use_builtin_types:
return self._fp.read(s)
else:
return Data(self._fp.read(s))
elif tokenH == 0x50: # ascii string
s = self._get_size(tokenL)
result = self._fp.read(s).decode('ascii')
return result
elif tokenH == 0x60: # unicode string
s = self._get_size(tokenL)
return self._fp.read(s * 2).decode('utf-16be')
# tokenH == 0x80 is documented as 'UID' and appears to be used for
# keyed-archiving, not in plists.
elif tokenH == 0xA0: # array
s = self._get_size(tokenL)
obj_refs = self._read_refs(s)
return [self._read_object(self._object_offsets[x])
for x in obj_refs]
# tokenH == 0xB0 is documented as 'ordset', but is not actually
# implemented in the Apple reference code.
# tokenH == 0xC0 is documented as 'set', but sets cannot be used in
# plists.
elif tokenH == 0xD0: # dict
s = self._get_size(tokenL)
key_refs = self._read_refs(s)
obj_refs = self._read_refs(s)
result = self._dict_type()
for k, o in zip(key_refs, obj_refs):
result[self._read_object(self._object_offsets[k])
] = self._read_object(self._object_offsets[o])
return result
raise InvalidFileException()
def _count_to_size(count):
if count < 1 << 8:
return 1
elif count < 1 << 16:
return 2
elif count << 1 << 32:
return 4
else:
return 8
class _BinaryPlistWriter (object):
def __init__(self, fp, sort_keys, skipkeys):
self._fp = fp
self._sort_keys = sort_keys
self._skipkeys = skipkeys
def write(self, value):
# Flattened object list:
self._objlist = []
# Mappings from object->objectid
# First dict has (type(object), object) as the key,
# second dict is used when object is not hashable and
# has id(object) as the key.
self._objtable = {}
self._objidtable = {}
# Create list of all objects in the plist
self._flatten(value)
# Size of object references in serialized containers
# depends on the number of objects in the plist.
num_objects = len(self._objlist)
self._object_offsets = [0]*num_objects
self._ref_size = _count_to_size(num_objects)
self._ref_format = _BINARY_FORMAT[self._ref_size]
# Write file header
self._fp.write(b'bplist00')
# Write object list
for obj in self._objlist:
self._write_object(obj)
# Write refnum->object offset table
top_object = self._getrefnum(value)
offset_table_offset = self._fp.tell()
offset_size = _count_to_size(offset_table_offset)
offset_format = '>' + _BINARY_FORMAT[offset_size] * num_objects
self._fp.write(struct.pack(offset_format, *self._object_offsets))
# Write trailer
sort_version = 0
trailer = (
sort_version, offset_size, self._ref_size, num_objects,
top_object, offset_table_offset
)
self._fp.write(struct.pack('>5xBBBQQQ', *trailer))
def _flatten(self, value):
# First check if the object is in the object table, not used for
# containers to ensure that two subcontainers with the same contents
# will be serialized as distinct values.
if isinstance(value, (
str, int, float, datetime.datetime, bytes, bytearray)):
if (type(value), value) in self._objtable:
return
elif isinstance(value, Data):
if (type(value.data), value.data) in self._objtable:
return
# Add to objectreference map
refnum = len(self._objlist)
self._objlist.append(value)
try:
if isinstance(value, Data):
self._objtable[(type(value.data), value.data)] = refnum
else:
self._objtable[(type(value), value)] = refnum
except TypeError:
self._objidtable[id(value)] = refnum
# And finally recurse into containers
if isinstance(value, dict):
keys = []
values = []
items = value.items()
if self._sort_keys:
items = sorted(items)
for k, v in items:
if not isinstance(k, str):
if self._skipkeys:
continue
raise TypeError("keys must be strings")
keys.append(k)
values.append(v)
for o in itertools.chain(keys, values):
self._flatten(o)
elif isinstance(value, (list, tuple)):
for o in value:
self._flatten(o)
def _getrefnum(self, value):
try:
if isinstance(value, Data):
return self._objtable[(type(value.data), value.data)]
else:
return self._objtable[(type(value), value)]
except TypeError:
return self._objidtable[id(value)]
def _write_size(self, token, size):
if size < 15:
self._fp.write(struct.pack('>B', token | size))
elif size < 1 << 8:
self._fp.write(struct.pack('>BBB', token | 0xF, 0x10, size))
elif size < 1 << 16:
self._fp.write(struct.pack('>BBH', token | 0xF, 0x11, size))
elif size < 1 << 32:
self._fp.write(struct.pack('>BBL', token | 0xF, 0x12, size))
else:
self._fp.write(struct.pack('>BBQ', token | 0xF, 0x13, size))
def _write_object(self, value):
ref = self._getrefnum(value)
self._object_offsets[ref] = self._fp.tell()
if value is None:
self._fp.write(b'\x00')
elif value is False:
self._fp.write(b'\x08')
elif value is True:
self._fp.write(b'\x09')
elif isinstance(value, int):
if value < 0:
try:
self._fp.write(struct.pack('>Bq', 0x13, value))
except struct.error:
raise OverflowError(value) from None
elif value < 1 << 8:
self._fp.write(struct.pack('>BB', 0x10, value))
elif value < 1 << 16:
self._fp.write(struct.pack('>BH', 0x11, value))
elif value < 1 << 32:
self._fp.write(struct.pack('>BL', 0x12, value))
elif value < 1 << 63:
self._fp.write(struct.pack('>BQ', 0x13, value))
elif value < 1 << 64:
self._fp.write(b'\x14' + value.to_bytes(16, 'big', signed=True))
else:
raise OverflowError(value)
elif isinstance(value, float):
self._fp.write(struct.pack('>Bd', 0x23, value))
elif isinstance(value, datetime.datetime):
f = (value - datetime.datetime(2001, 1, 1)).total_seconds()
self._fp.write(struct.pack('>Bd', 0x33, f))
elif isinstance(value, Data):
self._write_size(0x40, len(value.data))
self._fp.write(value.data)
elif isinstance(value, (bytes, bytearray)):
self._write_size(0x40, len(value))
self._fp.write(value)
elif isinstance(value, str):
try:
t = value.encode('ascii')
self._write_size(0x50, len(value))
except UnicodeEncodeError:
t = value.encode('utf-16be')
self._write_size(0x60, len(value))
self._fp.write(t)
elif isinstance(value, (list, tuple)):
refs = [self._getrefnum(o) for o in value]
s = len(refs)
self._write_size(0xA0, s)
self._fp.write(struct.pack('>' + self._ref_format * s, *refs))
elif isinstance(value, dict):
keyRefs, valRefs = [], []
if self._sort_keys:
rootItems = sorted(value.items())
else:
rootItems = value.items()
for k, v in rootItems:
if not isinstance(k, str):
if self._skipkeys:
continue
raise TypeError("keys must be strings")
keyRefs.append(self._getrefnum(k))
valRefs.append(self._getrefnum(v))
s = len(keyRefs)
self._write_size(0xD0, s)
self._fp.write(struct.pack('>' + self._ref_format * s, *keyRefs))
self._fp.write(struct.pack('>' + self._ref_format * s, *valRefs))
else:
raise TypeError(value)
def _is_fmt_binary(header):
return header[:8] == b'bplist00'
#
# Generic bits
#
_FORMATS={
FMT_XML: dict(
detect=_is_fmt_xml,
parser=_PlistParser,
writer=_PlistWriter,
),
FMT_BINARY: dict(
detect=_is_fmt_binary,
parser=_BinaryPlistParser,
writer=_BinaryPlistWriter,
)
}
def load(fp, *, fmt=None, use_builtin_types=True, dict_type=dict):
"""Read a .plist file. 'fp' should be (readable) file object.
Return the unpacked root object (which usually is a dictionary).
"""
if fmt is None:
header = fp.read(32)
fp.seek(0)
for info in _FORMATS.values():
if info['detect'](header):
P = info['parser']
break
else:
raise InvalidFileException()
else:
P = _FORMATS[fmt]['parser']
p = P(use_builtin_types=use_builtin_types, dict_type=dict_type)
return p.parse(fp)
def loads(value, *, fmt=None, use_builtin_types=True, dict_type=dict):
"""Read a .plist file from a bytes object.
Return the unpacked root object (which usually is a dictionary).
"""
fp = BytesIO(value)
return load(
fp, fmt=fmt, use_builtin_types=use_builtin_types, dict_type=dict_type)
def dump(value, fp, *, fmt=FMT_XML, sort_keys=True, skipkeys=False):
"""Write 'value' to a .plist file. 'fp' should be a (writable)
file object.
"""
if fmt not in _FORMATS:
raise ValueError("Unsupported format: %r"%(fmt,))
writer = _FORMATS[fmt]["writer"](fp, sort_keys=sort_keys, skipkeys=skipkeys)
writer.write(value)
def dumps(value, *, fmt=FMT_XML, skipkeys=False, sort_keys=True):
"""Return a bytes object with the contents for a .plist file.
"""
fp = BytesIO()
dump(value, fp, fmt=fmt, skipkeys=skipkeys, sort_keys=sort_keys)
return fp.getvalue()
=======
r"""plistlib.py -- a tool to generate and parse MacOSX .plist files.
The property list (.plist) file format is a simple XML pickle supporting
basic object types, like dictionaries, lists, numbers and strings.
Usually the top level object is a dictionary.
To write out a plist file, use the dump(value, file)
function. 'value' is the top level object, 'file' is
a (writable) file object.
To parse a plist from a file, use the load(file) function,
with a (readable) file object as the only argument. It
returns the top level object (again, usually a dictionary).
To work with plist data in bytes objects, you can use loads()
and dumps().
Values can be strings, integers, floats, booleans, tuples, lists,
dictionaries (but only with string keys), Data, bytes, bytearray, or
datetime.datetime objects.
Generate Plist example:
pl = dict(
aString = "Doodah",
aList = ["A", "B", 12, 32.1, [1, 2, 3]],
aFloat = 0.1,
anInt = 728,
aDict = dict(
anotherString = "<hello & hi there!>",
aUnicodeValue = "M\xe4ssig, Ma\xdf",
aTrueValue = True,
aFalseValue = False,
),
someData = b"<binary gunk>",
someMoreData = b"<lots of binary gunk>" * 10,
aDate = datetime.datetime.fromtimestamp(time.mktime(time.gmtime())),
)
with open(fileName, 'wb') as fp:
dump(pl, fp)
Parse Plist example:
with open(fileName, 'rb') as fp:
pl = load(fp)
print(pl["aKey"])
"""
__all__ = [
"readPlist", "writePlist", "readPlistFromBytes", "writePlistToBytes",
"Plist", "Data", "Dict", "FMT_XML", "FMT_BINARY",
"load", "dump", "loads", "dumps"
]
import binascii
import codecs
import contextlib
import datetime
import enum
from io import BytesIO
import itertools
import os
import re
import struct
from warnings import warn
from xml.parsers.expat import ParserCreate
PlistFormat = enum.Enum('PlistFormat', 'FMT_XML FMT_BINARY', module=__name__)
globals().update(PlistFormat.__members__)
#
#
# Deprecated functionality
#
#
class _InternalDict(dict):
# This class is needed while Dict is scheduled for deprecation:
# we only need to warn when a *user* instantiates Dict or when
# the "attribute notation for dict keys" is used.
__slots__ = ()
def __getattr__(self, attr):
try:
value = self[attr]
except KeyError:
raise AttributeError(attr)
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", DeprecationWarning, 2)
return value
def __setattr__(self, attr, value):
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", DeprecationWarning, 2)
self[attr] = value
def __delattr__(self, attr):
try:
del self[attr]
except KeyError:
raise AttributeError(attr)
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", DeprecationWarning, 2)
class Dict(_InternalDict):
def __init__(self, **kwargs):
warn("The plistlib.Dict class is deprecated, use builtin dict instead",
DeprecationWarning, 2)
super().__init__(**kwargs)
@contextlib.contextmanager
def _maybe_open(pathOrFile, mode):
if isinstance(pathOrFile, str):
with open(pathOrFile, mode) as fp:
yield fp
else:
yield pathOrFile
class Plist(_InternalDict):
"""This class has been deprecated. Use dump() and load()
functions instead, together with regular dict objects.
"""
def __init__(self, **kwargs):
warn("The Plist class is deprecated, use the load() and "
"dump() functions instead", DeprecationWarning, 2)
super().__init__(**kwargs)
@classmethod
def fromFile(cls, pathOrFile):
"""Deprecated. Use the load() function instead."""
with _maybe_open(pathOrFile, 'rb') as fp:
value = load(fp)
plist = cls()
plist.update(value)
return plist
def write(self, pathOrFile):
"""Deprecated. Use the dump() function instead."""
with _maybe_open(pathOrFile, 'wb') as fp:
dump(self, fp)
def readPlist(pathOrFile):
"""
Read a .plist from a path or file. pathOrFile should either
be a file name, or a readable binary file object.
This function is deprecated, use load instead.
"""
warn("The readPlist function is deprecated, use load() instead",
DeprecationWarning, 2)
with _maybe_open(pathOrFile, 'rb') as fp:
return load(fp, fmt=None, use_builtin_types=False,
dict_type=_InternalDict)
def writePlist(value, pathOrFile):
"""
Write 'value' to a .plist file. 'pathOrFile' may either be a
file name or a (writable) file object.
This function is deprecated, use dump instead.
"""
warn("The writePlist function is deprecated, use dump() instead",
DeprecationWarning, 2)
with _maybe_open(pathOrFile, 'wb') as fp:
dump(value, fp, fmt=FMT_XML, sort_keys=True, skipkeys=False)
def readPlistFromBytes(data):
"""
Read a plist data from a bytes object. Return the root object.
This function is deprecated, use loads instead.
"""
warn("The readPlistFromBytes function is deprecated, use loads() instead",
DeprecationWarning, 2)
return load(BytesIO(data), fmt=None, use_builtin_types=False,
dict_type=_InternalDict)
def writePlistToBytes(value):
"""
Return 'value' as a plist-formatted bytes object.
This function is deprecated, use dumps instead.
"""
warn("The writePlistToBytes function is deprecated, use dumps() instead",
DeprecationWarning, 2)
f = BytesIO()
dump(value, f, fmt=FMT_XML, sort_keys=True, skipkeys=False)
return f.getvalue()
class Data:
"""
Wrapper for binary data.
This class is deprecated, use a bytes object instead.
"""
def __init__(self, data):
if not isinstance(data, bytes):
raise TypeError("data must be as bytes")
self.data = data
@classmethod
def fromBase64(cls, data):
# base64.decodebytes just calls binascii.a2b_base64;
# it seems overkill to use both base64 and binascii.
return cls(_decode_base64(data))
def asBase64(self, maxlinelength=76):
return _encode_base64(self.data, maxlinelength)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.data == other.data
elif isinstance(other, str):
return self.data == other
else:
return id(self) == id(other)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self.data))
#
#
# End of deprecated functionality
#
#
#
# XML support
#
# XML 'header'
PLISTHEADER = b"""\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
"""
# Regex to find any control chars, except for \t \n and \r
_controlCharPat = re.compile(
r"[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f"
r"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]")
def _encode_base64(s, maxlinelength=76):
# copied from base64.encodebytes(), with added maxlinelength argument
maxbinsize = (maxlinelength//4)*3
pieces = []
for i in range(0, len(s), maxbinsize):
chunk = s[i : i + maxbinsize]
pieces.append(binascii.b2a_base64(chunk))
return b''.join(pieces)
def _decode_base64(s):
if isinstance(s, str):
return binascii.a2b_base64(s.encode("utf-8"))
else:
return binascii.a2b_base64(s)
# Contents should conform to a subset of ISO 8601
# (in particular, YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z'. Smaller units
# may be omitted with # a loss of precision)
_dateParser = re.compile(r"(?P<year>\d\d\d\d)(?:-(?P<month>\d\d)(?:-(?P<day>\d\d)(?:T(?P<hour>\d\d)(?::(?P<minute>\d\d)(?::(?P<second>\d\d))?)?)?)?)?Z", re.ASCII)
def _date_from_string(s):
order = ('year', 'month', 'day', 'hour', 'minute', 'second')
gd = _dateParser.match(s).groupdict()
lst = []
for key in order:
val = gd[key]
if val is None:
break
lst.append(int(val))
return datetime.datetime(*lst)
def _date_to_string(d):
return '%04d-%02d-%02dT%02d:%02d:%02dZ' % (
d.year, d.month, d.day,
d.hour, d.minute, d.second
)
def _escape(text):
m = _controlCharPat.search(text)
if m is not None:
raise ValueError("strings can't contains control characters; "
"use bytes instead")
text = text.replace("\r\n", "\n") # convert DOS line endings
text = text.replace("\r", "\n") # convert Mac line endings
text = text.replace("&", "&") # escape '&'
text = text.replace("<", "<") # escape '<'
text = text.replace(">", ">") # escape '>'
return text
class _PlistParser:
def __init__(self, use_builtin_types, dict_type):
self.stack = []
self.current_key = None
self.root = None
self._use_builtin_types = use_builtin_types
self._dict_type = dict_type
def parse(self, fileobj):
self.parser = ParserCreate()
self.parser.StartElementHandler = self.handle_begin_element
self.parser.EndElementHandler = self.handle_end_element
self.parser.CharacterDataHandler = self.handle_data
self.parser.ParseFile(fileobj)
return self.root
def handle_begin_element(self, element, attrs):
self.data = []
handler = getattr(self, "begin_" + element, None)
if handler is not None:
handler(attrs)
def handle_end_element(self, element):
handler = getattr(self, "end_" + element, None)
if handler is not None:
handler()
def handle_data(self, data):
self.data.append(data)
def add_object(self, value):
if self.current_key is not None:
if not isinstance(self.stack[-1], type({})):
raise ValueError("unexpected element at line %d" %
self.parser.CurrentLineNumber)
self.stack[-1][self.current_key] = value
self.current_key = None
elif not self.stack:
# this is the root object
self.root = value
else:
if not isinstance(self.stack[-1], type([])):
raise ValueError("unexpected element at line %d" %
self.parser.CurrentLineNumber)
self.stack[-1].append(value)
def get_data(self):
data = ''.join(self.data)
self.data = []
return data
# element handlers
def begin_dict(self, attrs):
d = self._dict_type()
self.add_object(d)
self.stack.append(d)
def end_dict(self):
if self.current_key:
raise ValueError("missing value for key '%s' at line %d" %
(self.current_key,self.parser.CurrentLineNumber))
self.stack.pop()
def end_key(self):
if self.current_key or not isinstance(self.stack[-1], type({})):
raise ValueError("unexpected key at line %d" %
self.parser.CurrentLineNumber)
self.current_key = self.get_data()
def begin_array(self, attrs):
a = []
self.add_object(a)
self.stack.append(a)
def end_array(self):
self.stack.pop()
def end_true(self):
self.add_object(True)
def end_false(self):
self.add_object(False)
def end_integer(self):
self.add_object(int(self.get_data()))
def end_real(self):
self.add_object(float(self.get_data()))
def end_string(self):
self.add_object(self.get_data())
def end_data(self):
if self._use_builtin_types:
self.add_object(_decode_base64(self.get_data()))
else:
self.add_object(Data.fromBase64(self.get_data()))
def end_date(self):
self.add_object(_date_from_string(self.get_data()))
class _DumbXMLWriter:
def __init__(self, file, indent_level=0, indent="\t"):
self.file = file
self.stack = []
self._indent_level = indent_level
self.indent = indent
def begin_element(self, element):
self.stack.append(element)
self.writeln("<%s>" % element)
self._indent_level += 1
def end_element(self, element):
assert self._indent_level > 0
assert self.stack.pop() == element
self._indent_level -= 1
self.writeln("</%s>" % element)
def simple_element(self, element, value=None):
if value is not None:
value = _escape(value)
self.writeln("<%s>%s</%s>" % (element, value, element))
else:
self.writeln("<%s/>" % element)
def writeln(self, line):
if line:
# plist has fixed encoding of utf-8
# XXX: is this test needed?
if isinstance(line, str):
line = line.encode('utf-8')
self.file.write(self._indent_level * self.indent)
self.file.write(line)
self.file.write(b'\n')
class _PlistWriter(_DumbXMLWriter):
def __init__(
self, file, indent_level=0, indent=b"\t", writeHeader=1,
sort_keys=True, skipkeys=False):
if writeHeader:
file.write(PLISTHEADER)
_DumbXMLWriter.__init__(self, file, indent_level, indent)
self._sort_keys = sort_keys
self._skipkeys = skipkeys
def write(self, value):
self.writeln("<plist version=\"1.0\">")
self.write_value(value)
self.writeln("</plist>")
def write_value(self, value):
if isinstance(value, str):
self.simple_element("string", value)
elif value is True:
self.simple_element("true")
elif value is False:
self.simple_element("false")
elif isinstance(value, int):
if -1 << 63 <= value < 1 << 64:
self.simple_element("integer", "%d" % value)
else:
raise OverflowError(value)
elif isinstance(value, float):
self.simple_element("real", repr(value))
elif isinstance(value, dict):
self.write_dict(value)
elif isinstance(value, Data):
self.write_data(value)
elif isinstance(value, (bytes, bytearray)):
self.write_bytes(value)
elif isinstance(value, datetime.datetime):
self.simple_element("date", _date_to_string(value))
elif isinstance(value, (tuple, list)):
self.write_array(value)
else:
raise TypeError("unsupported type: %s" % type(value))
def write_data(self, data):
self.write_bytes(data.data)
def write_bytes(self, data):
self.begin_element("data")
self._indent_level -= 1
maxlinelength = max(
16,
76 - len(self.indent.replace(b"\t", b" " * 8) * self._indent_level))
for line in _encode_base64(data, maxlinelength).split(b"\n"):
if line:
self.writeln(line)
self._indent_level += 1
self.end_element("data")
def write_dict(self, d):
if d:
self.begin_element("dict")
if self._sort_keys:
items = sorted(d.items())
else:
items = d.items()
for key, value in items:
if not isinstance(key, str):
if self._skipkeys:
continue
raise TypeError("keys must be strings")
self.simple_element("key", key)
self.write_value(value)
self.end_element("dict")
else:
self.simple_element("dict")
def write_array(self, array):
if array:
self.begin_element("array")
for value in array:
self.write_value(value)
self.end_element("array")
else:
self.simple_element("array")
def _is_fmt_xml(header):
prefixes = (b'<?xml', b'<plist')
for pfx in prefixes:
if header.startswith(pfx):
return True
# Also check for alternative XML encodings, this is slightly
# overkill because the Apple tools (and plistlib) will not
# generate files with these encodings.
for bom, encoding in (
(codecs.BOM_UTF8, "utf-8"),
(codecs.BOM_UTF16_BE, "utf-16-be"),
(codecs.BOM_UTF16_LE, "utf-16-le"),
# expat does not support utf-32
#(codecs.BOM_UTF32_BE, "utf-32-be"),
#(codecs.BOM_UTF32_LE, "utf-32-le"),
):
if not header.startswith(bom):
continue
for start in prefixes:
prefix = bom + start.decode('ascii').encode(encoding)
if header[:len(prefix)] == prefix:
return True
return False
#
# Binary Plist
#
class InvalidFileException (ValueError):
def __init__(self, message="Invalid file"):
ValueError.__init__(self, message)
_BINARY_FORMAT = {1: 'B', 2: 'H', 4: 'L', 8: 'Q'}
class _BinaryPlistParser:
"""
Read or write a binary plist file, following the description of the binary
format. Raise InvalidFileException in case of error, otherwise return the
root object.
see also: http://opensource.apple.com/source/CF/CF-744.18/CFBinaryPList.c
"""
def __init__(self, use_builtin_types, dict_type):
self._use_builtin_types = use_builtin_types
self._dict_type = dict_type
def parse(self, fp):
try:
# The basic file format:
# HEADER
# object...
# refid->offset...
# TRAILER
self._fp = fp
self._fp.seek(-32, os.SEEK_END)
trailer = self._fp.read(32)
if len(trailer) != 32:
raise InvalidFileException()
(
offset_size, self._ref_size, num_objects, top_object,
offset_table_offset
) = struct.unpack('>6xBBQQQ', trailer)
self._fp.seek(offset_table_offset)
self._object_offsets = self._read_ints(num_objects, offset_size)
return self._read_object(self._object_offsets[top_object])
except (OSError, IndexError, struct.error):
raise InvalidFileException()
def _get_size(self, tokenL):
""" return the size of the next object."""
if tokenL == 0xF:
m = self._fp.read(1)[0] & 0x3
s = 1 << m
f = '>' + _BINARY_FORMAT[s]
return struct.unpack(f, self._fp.read(s))[0]
return tokenL
def _read_ints(self, n, size):
data = self._fp.read(size * n)
if size in _BINARY_FORMAT:
return struct.unpack('>' + _BINARY_FORMAT[size] * n, data)
else:
return tuple(int.from_bytes(data[i: i + size], 'big')
for i in range(0, size * n, size))
def _read_refs(self, n):
return self._read_ints(n, self._ref_size)
def _read_object(self, offset):
"""
read the object at offset.
May recursively read sub-objects (content of an array/dict/set)
"""
self._fp.seek(offset)
token = self._fp.read(1)[0]
tokenH, tokenL = token & 0xF0, token & 0x0F
if token == 0x00:
return None
elif token == 0x08:
return False
elif token == 0x09:
return True
# The referenced source code also mentions URL (0x0c, 0x0d) and
# UUID (0x0e), but neither can be generated using the Cocoa libraries.
elif token == 0x0f:
return b''
elif tokenH == 0x10: # int
return int.from_bytes(self._fp.read(1 << tokenL),
'big', signed=tokenL >= 3)
elif token == 0x22: # real
return struct.unpack('>f', self._fp.read(4))[0]
elif token == 0x23: # real
return struct.unpack('>d', self._fp.read(8))[0]
elif token == 0x33: # date
f = struct.unpack('>d', self._fp.read(8))[0]
# timestamp 0 of binary plists corresponds to 1/1/2001
# (year of Mac OS X 10.0), instead of 1/1/1970.
return datetime.datetime.utcfromtimestamp(f + (31 * 365 + 8) * 86400)
elif tokenH == 0x40: # data
s = self._get_size(tokenL)
if self._use_builtin_types:
return self._fp.read(s)
else:
return Data(self._fp.read(s))
elif tokenH == 0x50: # ascii string
s = self._get_size(tokenL)
result = self._fp.read(s).decode('ascii')
return result
elif tokenH == 0x60: # unicode string
s = self._get_size(tokenL)
return self._fp.read(s * 2).decode('utf-16be')
# tokenH == 0x80 is documented as 'UID' and appears to be used for
# keyed-archiving, not in plists.
elif tokenH == 0xA0: # array
s = self._get_size(tokenL)
obj_refs = self._read_refs(s)
return [self._read_object(self._object_offsets[x])
for x in obj_refs]
# tokenH == 0xB0 is documented as 'ordset', but is not actually
# implemented in the Apple reference code.
# tokenH == 0xC0 is documented as 'set', but sets cannot be used in
# plists.
elif tokenH == 0xD0: # dict
s = self._get_size(tokenL)
key_refs = self._read_refs(s)
obj_refs = self._read_refs(s)
result = self._dict_type()
for k, o in zip(key_refs, obj_refs):
result[self._read_object(self._object_offsets[k])
] = self._read_object(self._object_offsets[o])
return result
raise InvalidFileException()
def _count_to_size(count):
if count < 1 << 8:
return 1
elif count < 1 << 16:
return 2
elif count << 1 << 32:
return 4
else:
return 8
class _BinaryPlistWriter (object):
def __init__(self, fp, sort_keys, skipkeys):
self._fp = fp
self._sort_keys = sort_keys
self._skipkeys = skipkeys
def write(self, value):
# Flattened object list:
self._objlist = []
# Mappings from object->objectid
# First dict has (type(object), object) as the key,
# second dict is used when object is not hashable and
# has id(object) as the key.
self._objtable = {}
self._objidtable = {}
# Create list of all objects in the plist
self._flatten(value)
# Size of object references in serialized containers
# depends on the number of objects in the plist.
num_objects = len(self._objlist)
self._object_offsets = [0]*num_objects
self._ref_size = _count_to_size(num_objects)
self._ref_format = _BINARY_FORMAT[self._ref_size]
# Write file header
self._fp.write(b'bplist00')
# Write object list
for obj in self._objlist:
self._write_object(obj)
# Write refnum->object offset table
top_object = self._getrefnum(value)
offset_table_offset = self._fp.tell()
offset_size = _count_to_size(offset_table_offset)
offset_format = '>' + _BINARY_FORMAT[offset_size] * num_objects
self._fp.write(struct.pack(offset_format, *self._object_offsets))
# Write trailer
sort_version = 0
trailer = (
sort_version, offset_size, self._ref_size, num_objects,
top_object, offset_table_offset
)
self._fp.write(struct.pack('>5xBBBQQQ', *trailer))
def _flatten(self, value):
# First check if the object is in the object table, not used for
# containers to ensure that two subcontainers with the same contents
# will be serialized as distinct values.
if isinstance(value, (
str, int, float, datetime.datetime, bytes, bytearray)):
if (type(value), value) in self._objtable:
return
elif isinstance(value, Data):
if (type(value.data), value.data) in self._objtable:
return
# Add to objectreference map
refnum = len(self._objlist)
self._objlist.append(value)
try:
if isinstance(value, Data):
self._objtable[(type(value.data), value.data)] = refnum
else:
self._objtable[(type(value), value)] = refnum
except TypeError:
self._objidtable[id(value)] = refnum
# And finally recurse into containers
if isinstance(value, dict):
keys = []
values = []
items = value.items()
if self._sort_keys:
items = sorted(items)
for k, v in items:
if not isinstance(k, str):
if self._skipkeys:
continue
raise TypeError("keys must be strings")
keys.append(k)
values.append(v)
for o in itertools.chain(keys, values):
self._flatten(o)
elif isinstance(value, (list, tuple)):
for o in value:
self._flatten(o)
def _getrefnum(self, value):
try:
if isinstance(value, Data):
return self._objtable[(type(value.data), value.data)]
else:
return self._objtable[(type(value), value)]
except TypeError:
return self._objidtable[id(value)]
def _write_size(self, token, size):
if size < 15:
self._fp.write(struct.pack('>B', token | size))
elif size < 1 << 8:
self._fp.write(struct.pack('>BBB', token | 0xF, 0x10, size))
elif size < 1 << 16:
self._fp.write(struct.pack('>BBH', token | 0xF, 0x11, size))
elif size < 1 << 32:
self._fp.write(struct.pack('>BBL', token | 0xF, 0x12, size))
else:
self._fp.write(struct.pack('>BBQ', token | 0xF, 0x13, size))
def _write_object(self, value):
ref = self._getrefnum(value)
self._object_offsets[ref] = self._fp.tell()
if value is None:
self._fp.write(b'\x00')
elif value is False:
self._fp.write(b'\x08')
elif value is True:
self._fp.write(b'\x09')
elif isinstance(value, int):
if value < 0:
try:
self._fp.write(struct.pack('>Bq', 0x13, value))
except struct.error:
raise OverflowError(value) from None
elif value < 1 << 8:
self._fp.write(struct.pack('>BB', 0x10, value))
elif value < 1 << 16:
self._fp.write(struct.pack('>BH', 0x11, value))
elif value < 1 << 32:
self._fp.write(struct.pack('>BL', 0x12, value))
elif value < 1 << 63:
self._fp.write(struct.pack('>BQ', 0x13, value))
elif value < 1 << 64:
self._fp.write(b'\x14' + value.to_bytes(16, 'big', signed=True))
else:
raise OverflowError(value)
elif isinstance(value, float):
self._fp.write(struct.pack('>Bd', 0x23, value))
elif isinstance(value, datetime.datetime):
f = (value - datetime.datetime(2001, 1, 1)).total_seconds()
self._fp.write(struct.pack('>Bd', 0x33, f))
elif isinstance(value, Data):
self._write_size(0x40, len(value.data))
self._fp.write(value.data)
elif isinstance(value, (bytes, bytearray)):
self._write_size(0x40, len(value))
self._fp.write(value)
elif isinstance(value, str):
try:
t = value.encode('ascii')
self._write_size(0x50, len(value))
except UnicodeEncodeError:
t = value.encode('utf-16be')
self._write_size(0x60, len(value))
self._fp.write(t)
elif isinstance(value, (list, tuple)):
refs = [self._getrefnum(o) for o in value]
s = len(refs)
self._write_size(0xA0, s)
self._fp.write(struct.pack('>' + self._ref_format * s, *refs))
elif isinstance(value, dict):
keyRefs, valRefs = [], []
if self._sort_keys:
rootItems = sorted(value.items())
else:
rootItems = value.items()
for k, v in rootItems:
if not isinstance(k, str):
if self._skipkeys:
continue
raise TypeError("keys must be strings")
keyRefs.append(self._getrefnum(k))
valRefs.append(self._getrefnum(v))
s = len(keyRefs)
self._write_size(0xD0, s)
self._fp.write(struct.pack('>' + self._ref_format * s, *keyRefs))
self._fp.write(struct.pack('>' + self._ref_format * s, *valRefs))
else:
raise TypeError(value)
def _is_fmt_binary(header):
return header[:8] == b'bplist00'
#
# Generic bits
#
_FORMATS={
FMT_XML: dict(
detect=_is_fmt_xml,
parser=_PlistParser,
writer=_PlistWriter,
),
FMT_BINARY: dict(
detect=_is_fmt_binary,
parser=_BinaryPlistParser,
writer=_BinaryPlistWriter,
)
}
def load(fp, *, fmt=None, use_builtin_types=True, dict_type=dict):
"""Read a .plist file. 'fp' should be (readable) file object.
Return the unpacked root object (which usually is a dictionary).
"""
if fmt is None:
header = fp.read(32)
fp.seek(0)
for info in _FORMATS.values():
if info['detect'](header):
P = info['parser']
break
else:
raise InvalidFileException()
else:
P = _FORMATS[fmt]['parser']
p = P(use_builtin_types=use_builtin_types, dict_type=dict_type)
return p.parse(fp)
def loads(value, *, fmt=None, use_builtin_types=True, dict_type=dict):
"""Read a .plist file from a bytes object.
Return the unpacked root object (which usually is a dictionary).
"""
fp = BytesIO(value)
return load(
fp, fmt=fmt, use_builtin_types=use_builtin_types, dict_type=dict_type)
def dump(value, fp, *, fmt=FMT_XML, sort_keys=True, skipkeys=False):
"""Write 'value' to a .plist file. 'fp' should be a (writable)
file object.
"""
if fmt not in _FORMATS:
raise ValueError("Unsupported format: %r"%(fmt,))
writer = _FORMATS[fmt]["writer"](fp, sort_keys=sort_keys, skipkeys=skipkeys)
writer.write(value)
def dumps(value, *, fmt=FMT_XML, skipkeys=False, sort_keys=True):
"""Return a bytes object with the contents for a .plist file.
"""
fp = BytesIO()
dump(value, fp, fmt=fmt, skipkeys=skipkeys, sort_keys=sort_keys)
return fp.getvalue()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
r"""plistlib.py -- a tool to generate and parse MacOSX .plist files.
The property list (.plist) file format is a simple XML pickle supporting
basic object types, like dictionaries, lists, numbers and strings.
Usually the top level object is a dictionary.
To write out a plist file, use the dump(value, file)
function. 'value' is the top level object, 'file' is
a (writable) file object.
To parse a plist from a file, use the load(file) function,
with a (readable) file object as the only argument. It
returns the top level object (again, usually a dictionary).
To work with plist data in bytes objects, you can use loads()
and dumps().
Values can be strings, integers, floats, booleans, tuples, lists,
dictionaries (but only with string keys), Data, bytes, bytearray, or
datetime.datetime objects.
Generate Plist example:
pl = dict(
aString = "Doodah",
aList = ["A", "B", 12, 32.1, [1, 2, 3]],
aFloat = 0.1,
anInt = 728,
aDict = dict(
anotherString = "<hello & hi there!>",
aUnicodeValue = "M\xe4ssig, Ma\xdf",
aTrueValue = True,
aFalseValue = False,
),
someData = b"<binary gunk>",
someMoreData = b"<lots of binary gunk>" * 10,
aDate = datetime.datetime.fromtimestamp(time.mktime(time.gmtime())),
)
with open(fileName, 'wb') as fp:
dump(pl, fp)
Parse Plist example:
with open(fileName, 'rb') as fp:
pl = load(fp)
print(pl["aKey"])
"""
__all__ = [
"readPlist", "writePlist", "readPlistFromBytes", "writePlistToBytes",
"Plist", "Data", "Dict", "FMT_XML", "FMT_BINARY",
"load", "dump", "loads", "dumps"
]
import binascii
import codecs
import contextlib
import datetime
import enum
from io import BytesIO
import itertools
import os
import re
import struct
from warnings import warn
from xml.parsers.expat import ParserCreate
PlistFormat = enum.Enum('PlistFormat', 'FMT_XML FMT_BINARY', module=__name__)
globals().update(PlistFormat.__members__)
#
#
# Deprecated functionality
#
#
class _InternalDict(dict):
# This class is needed while Dict is scheduled for deprecation:
# we only need to warn when a *user* instantiates Dict or when
# the "attribute notation for dict keys" is used.
__slots__ = ()
def __getattr__(self, attr):
try:
value = self[attr]
except KeyError:
raise AttributeError(attr)
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", DeprecationWarning, 2)
return value
def __setattr__(self, attr, value):
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", DeprecationWarning, 2)
self[attr] = value
def __delattr__(self, attr):
try:
del self[attr]
except KeyError:
raise AttributeError(attr)
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", DeprecationWarning, 2)
class Dict(_InternalDict):
def __init__(self, **kwargs):
warn("The plistlib.Dict class is deprecated, use builtin dict instead",
DeprecationWarning, 2)
super().__init__(**kwargs)
@contextlib.contextmanager
def _maybe_open(pathOrFile, mode):
if isinstance(pathOrFile, str):
with open(pathOrFile, mode) as fp:
yield fp
else:
yield pathOrFile
class Plist(_InternalDict):
"""This class has been deprecated. Use dump() and load()
functions instead, together with regular dict objects.
"""
def __init__(self, **kwargs):
warn("The Plist class is deprecated, use the load() and "
"dump() functions instead", DeprecationWarning, 2)
super().__init__(**kwargs)
@classmethod
def fromFile(cls, pathOrFile):
"""Deprecated. Use the load() function instead."""
with _maybe_open(pathOrFile, 'rb') as fp:
value = load(fp)
plist = cls()
plist.update(value)
return plist
def write(self, pathOrFile):
"""Deprecated. Use the dump() function instead."""
with _maybe_open(pathOrFile, 'wb') as fp:
dump(self, fp)
def readPlist(pathOrFile):
"""
Read a .plist from a path or file. pathOrFile should either
be a file name, or a readable binary file object.
This function is deprecated, use load instead.
"""
warn("The readPlist function is deprecated, use load() instead",
DeprecationWarning, 2)
with _maybe_open(pathOrFile, 'rb') as fp:
return load(fp, fmt=None, use_builtin_types=False,
dict_type=_InternalDict)
def writePlist(value, pathOrFile):
"""
Write 'value' to a .plist file. 'pathOrFile' may either be a
file name or a (writable) file object.
This function is deprecated, use dump instead.
"""
warn("The writePlist function is deprecated, use dump() instead",
DeprecationWarning, 2)
with _maybe_open(pathOrFile, 'wb') as fp:
dump(value, fp, fmt=FMT_XML, sort_keys=True, skipkeys=False)
def readPlistFromBytes(data):
"""
Read a plist data from a bytes object. Return the root object.
This function is deprecated, use loads instead.
"""
warn("The readPlistFromBytes function is deprecated, use loads() instead",
DeprecationWarning, 2)
return load(BytesIO(data), fmt=None, use_builtin_types=False,
dict_type=_InternalDict)
def writePlistToBytes(value):
"""
Return 'value' as a plist-formatted bytes object.
This function is deprecated, use dumps instead.
"""
warn("The writePlistToBytes function is deprecated, use dumps() instead",
DeprecationWarning, 2)
f = BytesIO()
dump(value, f, fmt=FMT_XML, sort_keys=True, skipkeys=False)
return f.getvalue()
class Data:
"""
Wrapper for binary data.
This class is deprecated, use a bytes object instead.
"""
def __init__(self, data):
if not isinstance(data, bytes):
raise TypeError("data must be as bytes")
self.data = data
@classmethod
def fromBase64(cls, data):
# base64.decodebytes just calls binascii.a2b_base64;
# it seems overkill to use both base64 and binascii.
return cls(_decode_base64(data))
def asBase64(self, maxlinelength=76):
return _encode_base64(self.data, maxlinelength)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.data == other.data
elif isinstance(other, str):
return self.data == other
else:
return id(self) == id(other)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self.data))
#
#
# End of deprecated functionality
#
#
#
# XML support
#
# XML 'header'
PLISTHEADER = b"""\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
"""
# Regex to find any control chars, except for \t \n and \r
_controlCharPat = re.compile(
r"[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f"
r"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]")
def _encode_base64(s, maxlinelength=76):
# copied from base64.encodebytes(), with added maxlinelength argument
maxbinsize = (maxlinelength//4)*3
pieces = []
for i in range(0, len(s), maxbinsize):
chunk = s[i : i + maxbinsize]
pieces.append(binascii.b2a_base64(chunk))
return b''.join(pieces)
def _decode_base64(s):
if isinstance(s, str):
return binascii.a2b_base64(s.encode("utf-8"))
else:
return binascii.a2b_base64(s)
# Contents should conform to a subset of ISO 8601
# (in particular, YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z'. Smaller units
# may be omitted with # a loss of precision)
_dateParser = re.compile(r"(?P<year>\d\d\d\d)(?:-(?P<month>\d\d)(?:-(?P<day>\d\d)(?:T(?P<hour>\d\d)(?::(?P<minute>\d\d)(?::(?P<second>\d\d))?)?)?)?)?Z", re.ASCII)
def _date_from_string(s):
order = ('year', 'month', 'day', 'hour', 'minute', 'second')
gd = _dateParser.match(s).groupdict()
lst = []
for key in order:
val = gd[key]
if val is None:
break
lst.append(int(val))
return datetime.datetime(*lst)
def _date_to_string(d):
return '%04d-%02d-%02dT%02d:%02d:%02dZ' % (
d.year, d.month, d.day,
d.hour, d.minute, d.second
)
def _escape(text):
m = _controlCharPat.search(text)
if m is not None:
raise ValueError("strings can't contains control characters; "
"use bytes instead")
text = text.replace("\r\n", "\n") # convert DOS line endings
text = text.replace("\r", "\n") # convert Mac line endings
text = text.replace("&", "&") # escape '&'
text = text.replace("<", "<") # escape '<'
text = text.replace(">", ">") # escape '>'
return text
class _PlistParser:
def __init__(self, use_builtin_types, dict_type):
self.stack = []
self.current_key = None
self.root = None
self._use_builtin_types = use_builtin_types
self._dict_type = dict_type
def parse(self, fileobj):
self.parser = ParserCreate()
self.parser.StartElementHandler = self.handle_begin_element
self.parser.EndElementHandler = self.handle_end_element
self.parser.CharacterDataHandler = self.handle_data
self.parser.ParseFile(fileobj)
return self.root
def handle_begin_element(self, element, attrs):
self.data = []
handler = getattr(self, "begin_" + element, None)
if handler is not None:
handler(attrs)
def handle_end_element(self, element):
handler = getattr(self, "end_" + element, None)
if handler is not None:
handler()
def handle_data(self, data):
self.data.append(data)
def add_object(self, value):
if self.current_key is not None:
if not isinstance(self.stack[-1], type({})):
raise ValueError("unexpected element at line %d" %
self.parser.CurrentLineNumber)
self.stack[-1][self.current_key] = value
self.current_key = None
elif not self.stack:
# this is the root object
self.root = value
else:
if not isinstance(self.stack[-1], type([])):
raise ValueError("unexpected element at line %d" %
self.parser.CurrentLineNumber)
self.stack[-1].append(value)
def get_data(self):
data = ''.join(self.data)
self.data = []
return data
# element handlers
def begin_dict(self, attrs):
d = self._dict_type()
self.add_object(d)
self.stack.append(d)
def end_dict(self):
if self.current_key:
raise ValueError("missing value for key '%s' at line %d" %
(self.current_key,self.parser.CurrentLineNumber))
self.stack.pop()
def end_key(self):
if self.current_key or not isinstance(self.stack[-1], type({})):
raise ValueError("unexpected key at line %d" %
self.parser.CurrentLineNumber)
self.current_key = self.get_data()
def begin_array(self, attrs):
a = []
self.add_object(a)
self.stack.append(a)
def end_array(self):
self.stack.pop()
def end_true(self):
self.add_object(True)
def end_false(self):
self.add_object(False)
def end_integer(self):
self.add_object(int(self.get_data()))
def end_real(self):
self.add_object(float(self.get_data()))
def end_string(self):
self.add_object(self.get_data())
def end_data(self):
if self._use_builtin_types:
self.add_object(_decode_base64(self.get_data()))
else:
self.add_object(Data.fromBase64(self.get_data()))
def end_date(self):
self.add_object(_date_from_string(self.get_data()))
class _DumbXMLWriter:
def __init__(self, file, indent_level=0, indent="\t"):
self.file = file
self.stack = []
self._indent_level = indent_level
self.indent = indent
def begin_element(self, element):
self.stack.append(element)
self.writeln("<%s>" % element)
self._indent_level += 1
def end_element(self, element):
assert self._indent_level > 0
assert self.stack.pop() == element
self._indent_level -= 1
self.writeln("</%s>" % element)
def simple_element(self, element, value=None):
if value is not None:
value = _escape(value)
self.writeln("<%s>%s</%s>" % (element, value, element))
else:
self.writeln("<%s/>" % element)
def writeln(self, line):
if line:
# plist has fixed encoding of utf-8
# XXX: is this test needed?
if isinstance(line, str):
line = line.encode('utf-8')
self.file.write(self._indent_level * self.indent)
self.file.write(line)
self.file.write(b'\n')
class _PlistWriter(_DumbXMLWriter):
def __init__(
self, file, indent_level=0, indent=b"\t", writeHeader=1,
sort_keys=True, skipkeys=False):
if writeHeader:
file.write(PLISTHEADER)
_DumbXMLWriter.__init__(self, file, indent_level, indent)
self._sort_keys = sort_keys
self._skipkeys = skipkeys
def write(self, value):
self.writeln("<plist version=\"1.0\">")
self.write_value(value)
self.writeln("</plist>")
def write_value(self, value):
if isinstance(value, str):
self.simple_element("string", value)
elif value is True:
self.simple_element("true")
elif value is False:
self.simple_element("false")
elif isinstance(value, int):
if -1 << 63 <= value < 1 << 64:
self.simple_element("integer", "%d" % value)
else:
raise OverflowError(value)
elif isinstance(value, float):
self.simple_element("real", repr(value))
elif isinstance(value, dict):
self.write_dict(value)
elif isinstance(value, Data):
self.write_data(value)
elif isinstance(value, (bytes, bytearray)):
self.write_bytes(value)
elif isinstance(value, datetime.datetime):
self.simple_element("date", _date_to_string(value))
elif isinstance(value, (tuple, list)):
self.write_array(value)
else:
raise TypeError("unsupported type: %s" % type(value))
def write_data(self, data):
self.write_bytes(data.data)
def write_bytes(self, data):
self.begin_element("data")
self._indent_level -= 1
maxlinelength = max(
16,
76 - len(self.indent.replace(b"\t", b" " * 8) * self._indent_level))
for line in _encode_base64(data, maxlinelength).split(b"\n"):
if line:
self.writeln(line)
self._indent_level += 1
self.end_element("data")
def write_dict(self, d):
if d:
self.begin_element("dict")
if self._sort_keys:
items = sorted(d.items())
else:
items = d.items()
for key, value in items:
if not isinstance(key, str):
if self._skipkeys:
continue
raise TypeError("keys must be strings")
self.simple_element("key", key)
self.write_value(value)
self.end_element("dict")
else:
self.simple_element("dict")
def write_array(self, array):
if array:
self.begin_element("array")
for value in array:
self.write_value(value)
self.end_element("array")
else:
self.simple_element("array")
def _is_fmt_xml(header):
prefixes = (b'<?xml', b'<plist')
for pfx in prefixes:
if header.startswith(pfx):
return True
# Also check for alternative XML encodings, this is slightly
# overkill because the Apple tools (and plistlib) will not
# generate files with these encodings.
for bom, encoding in (
(codecs.BOM_UTF8, "utf-8"),
(codecs.BOM_UTF16_BE, "utf-16-be"),
(codecs.BOM_UTF16_LE, "utf-16-le"),
# expat does not support utf-32
#(codecs.BOM_UTF32_BE, "utf-32-be"),
#(codecs.BOM_UTF32_LE, "utf-32-le"),
):
if not header.startswith(bom):
continue
for start in prefixes:
prefix = bom + start.decode('ascii').encode(encoding)
if header[:len(prefix)] == prefix:
return True
return False
#
# Binary Plist
#
class InvalidFileException (ValueError):
def __init__(self, message="Invalid file"):
ValueError.__init__(self, message)
_BINARY_FORMAT = {1: 'B', 2: 'H', 4: 'L', 8: 'Q'}
class _BinaryPlistParser:
"""
Read or write a binary plist file, following the description of the binary
format. Raise InvalidFileException in case of error, otherwise return the
root object.
see also: http://opensource.apple.com/source/CF/CF-744.18/CFBinaryPList.c
"""
def __init__(self, use_builtin_types, dict_type):
self._use_builtin_types = use_builtin_types
self._dict_type = dict_type
def parse(self, fp):
try:
# The basic file format:
# HEADER
# object...
# refid->offset...
# TRAILER
self._fp = fp
self._fp.seek(-32, os.SEEK_END)
trailer = self._fp.read(32)
if len(trailer) != 32:
raise InvalidFileException()
(
offset_size, self._ref_size, num_objects, top_object,
offset_table_offset
) = struct.unpack('>6xBBQQQ', trailer)
self._fp.seek(offset_table_offset)
self._object_offsets = self._read_ints(num_objects, offset_size)
return self._read_object(self._object_offsets[top_object])
except (OSError, IndexError, struct.error):
raise InvalidFileException()
def _get_size(self, tokenL):
""" return the size of the next object."""
if tokenL == 0xF:
m = self._fp.read(1)[0] & 0x3
s = 1 << m
f = '>' + _BINARY_FORMAT[s]
return struct.unpack(f, self._fp.read(s))[0]
return tokenL
def _read_ints(self, n, size):
data = self._fp.read(size * n)
if size in _BINARY_FORMAT:
return struct.unpack('>' + _BINARY_FORMAT[size] * n, data)
else:
return tuple(int.from_bytes(data[i: i + size], 'big')
for i in range(0, size * n, size))
def _read_refs(self, n):
return self._read_ints(n, self._ref_size)
def _read_object(self, offset):
"""
read the object at offset.
May recursively read sub-objects (content of an array/dict/set)
"""
self._fp.seek(offset)
token = self._fp.read(1)[0]
tokenH, tokenL = token & 0xF0, token & 0x0F
if token == 0x00:
return None
elif token == 0x08:
return False
elif token == 0x09:
return True
# The referenced source code also mentions URL (0x0c, 0x0d) and
# UUID (0x0e), but neither can be generated using the Cocoa libraries.
elif token == 0x0f:
return b''
elif tokenH == 0x10: # int
return int.from_bytes(self._fp.read(1 << tokenL),
'big', signed=tokenL >= 3)
elif token == 0x22: # real
return struct.unpack('>f', self._fp.read(4))[0]
elif token == 0x23: # real
return struct.unpack('>d', self._fp.read(8))[0]
elif token == 0x33: # date
f = struct.unpack('>d', self._fp.read(8))[0]
# timestamp 0 of binary plists corresponds to 1/1/2001
# (year of Mac OS X 10.0), instead of 1/1/1970.
return datetime.datetime.utcfromtimestamp(f + (31 * 365 + 8) * 86400)
elif tokenH == 0x40: # data
s = self._get_size(tokenL)
if self._use_builtin_types:
return self._fp.read(s)
else:
return Data(self._fp.read(s))
elif tokenH == 0x50: # ascii string
s = self._get_size(tokenL)
result = self._fp.read(s).decode('ascii')
return result
elif tokenH == 0x60: # unicode string
s = self._get_size(tokenL)
return self._fp.read(s * 2).decode('utf-16be')
# tokenH == 0x80 is documented as 'UID' and appears to be used for
# keyed-archiving, not in plists.
elif tokenH == 0xA0: # array
s = self._get_size(tokenL)
obj_refs = self._read_refs(s)
return [self._read_object(self._object_offsets[x])
for x in obj_refs]
# tokenH == 0xB0 is documented as 'ordset', but is not actually
# implemented in the Apple reference code.
# tokenH == 0xC0 is documented as 'set', but sets cannot be used in
# plists.
elif tokenH == 0xD0: # dict
s = self._get_size(tokenL)
key_refs = self._read_refs(s)
obj_refs = self._read_refs(s)
result = self._dict_type()
for k, o in zip(key_refs, obj_refs):
result[self._read_object(self._object_offsets[k])
] = self._read_object(self._object_offsets[o])
return result
raise InvalidFileException()
def _count_to_size(count):
if count < 1 << 8:
return 1
elif count < 1 << 16:
return 2
elif count << 1 << 32:
return 4
else:
return 8
class _BinaryPlistWriter (object):
def __init__(self, fp, sort_keys, skipkeys):
self._fp = fp
self._sort_keys = sort_keys
self._skipkeys = skipkeys
def write(self, value):
# Flattened object list:
self._objlist = []
# Mappings from object->objectid
# First dict has (type(object), object) as the key,
# second dict is used when object is not hashable and
# has id(object) as the key.
self._objtable = {}
self._objidtable = {}
# Create list of all objects in the plist
self._flatten(value)
# Size of object references in serialized containers
# depends on the number of objects in the plist.
num_objects = len(self._objlist)
self._object_offsets = [0]*num_objects
self._ref_size = _count_to_size(num_objects)
self._ref_format = _BINARY_FORMAT[self._ref_size]
# Write file header
self._fp.write(b'bplist00')
# Write object list
for obj in self._objlist:
self._write_object(obj)
# Write refnum->object offset table
top_object = self._getrefnum(value)
offset_table_offset = self._fp.tell()
offset_size = _count_to_size(offset_table_offset)
offset_format = '>' + _BINARY_FORMAT[offset_size] * num_objects
self._fp.write(struct.pack(offset_format, *self._object_offsets))
# Write trailer
sort_version = 0
trailer = (
sort_version, offset_size, self._ref_size, num_objects,
top_object, offset_table_offset
)
self._fp.write(struct.pack('>5xBBBQQQ', *trailer))
def _flatten(self, value):
# First check if the object is in the object table, not used for
# containers to ensure that two subcontainers with the same contents
# will be serialized as distinct values.
if isinstance(value, (
str, int, float, datetime.datetime, bytes, bytearray)):
if (type(value), value) in self._objtable:
return
elif isinstance(value, Data):
if (type(value.data), value.data) in self._objtable:
return
# Add to objectreference map
refnum = len(self._objlist)
self._objlist.append(value)
try:
if isinstance(value, Data):
self._objtable[(type(value.data), value.data)] = refnum
else:
self._objtable[(type(value), value)] = refnum
except TypeError:
self._objidtable[id(value)] = refnum
# And finally recurse into containers
if isinstance(value, dict):
keys = []
values = []
items = value.items()
if self._sort_keys:
items = sorted(items)
for k, v in items:
if not isinstance(k, str):
if self._skipkeys:
continue
raise TypeError("keys must be strings")
keys.append(k)
values.append(v)
for o in itertools.chain(keys, values):
self._flatten(o)
elif isinstance(value, (list, tuple)):
for o in value:
self._flatten(o)
def _getrefnum(self, value):
try:
if isinstance(value, Data):
return self._objtable[(type(value.data), value.data)]
else:
return self._objtable[(type(value), value)]
except TypeError:
return self._objidtable[id(value)]
def _write_size(self, token, size):
if size < 15:
self._fp.write(struct.pack('>B', token | size))
elif size < 1 << 8:
self._fp.write(struct.pack('>BBB', token | 0xF, 0x10, size))
elif size < 1 << 16:
self._fp.write(struct.pack('>BBH', token | 0xF, 0x11, size))
elif size < 1 << 32:
self._fp.write(struct.pack('>BBL', token | 0xF, 0x12, size))
else:
self._fp.write(struct.pack('>BBQ', token | 0xF, 0x13, size))
def _write_object(self, value):
ref = self._getrefnum(value)
self._object_offsets[ref] = self._fp.tell()
if value is None:
self._fp.write(b'\x00')
elif value is False:
self._fp.write(b'\x08')
elif value is True:
self._fp.write(b'\x09')
elif isinstance(value, int):
if value < 0:
try:
self._fp.write(struct.pack('>Bq', 0x13, value))
except struct.error:
raise OverflowError(value) from None
elif value < 1 << 8:
self._fp.write(struct.pack('>BB', 0x10, value))
elif value < 1 << 16:
self._fp.write(struct.pack('>BH', 0x11, value))
elif value < 1 << 32:
self._fp.write(struct.pack('>BL', 0x12, value))
elif value < 1 << 63:
self._fp.write(struct.pack('>BQ', 0x13, value))
elif value < 1 << 64:
self._fp.write(b'\x14' + value.to_bytes(16, 'big', signed=True))
else:
raise OverflowError(value)
elif isinstance(value, float):
self._fp.write(struct.pack('>Bd', 0x23, value))
elif isinstance(value, datetime.datetime):
f = (value - datetime.datetime(2001, 1, 1)).total_seconds()
self._fp.write(struct.pack('>Bd', 0x33, f))
elif isinstance(value, Data):
self._write_size(0x40, len(value.data))
self._fp.write(value.data)
elif isinstance(value, (bytes, bytearray)):
self._write_size(0x40, len(value))
self._fp.write(value)
elif isinstance(value, str):
try:
t = value.encode('ascii')
self._write_size(0x50, len(value))
except UnicodeEncodeError:
t = value.encode('utf-16be')
self._write_size(0x60, len(value))
self._fp.write(t)
elif isinstance(value, (list, tuple)):
refs = [self._getrefnum(o) for o in value]
s = len(refs)
self._write_size(0xA0, s)
self._fp.write(struct.pack('>' + self._ref_format * s, *refs))
elif isinstance(value, dict):
keyRefs, valRefs = [], []
if self._sort_keys:
rootItems = sorted(value.items())
else:
rootItems = value.items()
for k, v in rootItems:
if not isinstance(k, str):
if self._skipkeys:
continue
raise TypeError("keys must be strings")
keyRefs.append(self._getrefnum(k))
valRefs.append(self._getrefnum(v))
s = len(keyRefs)
self._write_size(0xD0, s)
self._fp.write(struct.pack('>' + self._ref_format * s, *keyRefs))
self._fp.write(struct.pack('>' + self._ref_format * s, *valRefs))
else:
raise TypeError(value)
def _is_fmt_binary(header):
return header[:8] == b'bplist00'
#
# Generic bits
#
_FORMATS={
FMT_XML: dict(
detect=_is_fmt_xml,
parser=_PlistParser,
writer=_PlistWriter,
),
FMT_BINARY: dict(
detect=_is_fmt_binary,
parser=_BinaryPlistParser,
writer=_BinaryPlistWriter,
)
}
def load(fp, *, fmt=None, use_builtin_types=True, dict_type=dict):
"""Read a .plist file. 'fp' should be (readable) file object.
Return the unpacked root object (which usually is a dictionary).
"""
if fmt is None:
header = fp.read(32)
fp.seek(0)
for info in _FORMATS.values():
if info['detect'](header):
P = info['parser']
break
else:
raise InvalidFileException()
else:
P = _FORMATS[fmt]['parser']
p = P(use_builtin_types=use_builtin_types, dict_type=dict_type)
return p.parse(fp)
def loads(value, *, fmt=None, use_builtin_types=True, dict_type=dict):
"""Read a .plist file from a bytes object.
Return the unpacked root object (which usually is a dictionary).
"""
fp = BytesIO(value)
return load(
fp, fmt=fmt, use_builtin_types=use_builtin_types, dict_type=dict_type)
def dump(value, fp, *, fmt=FMT_XML, sort_keys=True, skipkeys=False):
"""Write 'value' to a .plist file. 'fp' should be a (writable)
file object.
"""
if fmt not in _FORMATS:
raise ValueError("Unsupported format: %r"%(fmt,))
writer = _FORMATS[fmt]["writer"](fp, sort_keys=sort_keys, skipkeys=skipkeys)
writer.write(value)
def dumps(value, *, fmt=FMT_XML, skipkeys=False, sort_keys=True):
"""Return a bytes object with the contents for a .plist file.
"""
fp = BytesIO()
dump(value, fp, fmt=fmt, skipkeys=skipkeys, sort_keys=sort_keys)
return fp.getvalue()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| [
"[email protected]"
] | |
2e67cfa924d6e66b5a606f3dd3c8371825938f80 | d12c4bb550c71bb3eaad5dbea099fa79d8001316 | /CodingNinjasOOPS/MethodOverrriding2.py | ba5f63e34530b739d0a728f0ae2216e44eaac359 | [] | no_license | nilesh7808/Coding-Ninjas-DSA-Programs | 8fdd980424e8b21026825ff5d444df7d70545629 | bfdc2bd9b317721d30b8109e6fb37c61bfc577e8 | refs/heads/master | 2023-04-29T02:54:01.247715 | 2021-05-22T03:34:34 | 2021-05-22T03:34:34 | 369,706,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py |
class vehicle:
def __init__(self,color):
self.color = color
def print(self):
print("The color of Car is:",self.color)
class Car(vehicle):
def print(self):
super().print() # Now it will look to the parent class
print("This is Pretty Good ")
c = Car("Black")
c.print() | [
"[email protected]"
] | |
bebb93feff90d5c700d2dac9c625f44c6b54a245 | 6732dce33ccc8d3912c7dd9bb5a029988586a649 | /samples/apps/search_organize_users.py | 0e3db637d8cf7cb4f2b9ef4ce7081cabd4862e0d | [
"Apache-2.0"
] | permissive | hamada2029/gdata-python3 | 8a0d3cb53b707b7ad2f826a486df254c813e7463 | c1028f6567b480908b90848523bebaf78e6b49f7 | refs/heads/master | 2021-01-22T12:53:28.196826 | 2014-11-30T07:05:30 | 2014-11-30T07:05:30 | 46,613,040 | 1 | 0 | null | 2015-11-21T11:44:20 | 2015-11-21T11:44:19 | null | UTF-8 | Python | false | false | 5,004 | py | #!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Search users with a given pattern and move to a new organization.
Sample to move users to a new organization based on a pattern using
the User Provisioning and Organization Provisioning APIs.
Usage:
$ python search_organize_users.py
"""
__author__ = 'Shraddha Gupta <[email protected]>'
from optparse import OptionParser
import re
from gdata.apps.client import AppsClient
from gdata.apps.organization.client import OrganizationUnitProvisioningClient
import gdata.gauth
BATCH_SIZE = 25
SCOPES = ('https://apps-apis.google.com/a/feeds/user/ '
'https://apps-apis.google.com/a/feeds/policies/')
USER_AGENT = 'SearchAndOrganizeUsers'
class SearchAndOrganizeUsers(object):
"""Search users with a pattern and move them to organization."""
def __init__(self, client_id, client_secret, domain):
"""Create a new SearchAndOrganizeUsers object configured for a domain.
Args:
client_id: [string] The clientId of the developer.
client_secret: [string] The clientSecret of the developer.
domain: [string] The domain on which the functions are to be performed.
"""
self.client_id = client_id
self.client_secret = client_secret
self.domain = domain
def AuthorizeClient(self):
"""Authorize the clients for making API requests."""
self.token = gdata.gauth.OAuth2Token(
client_id=self.client_id, client_secret=self.client_secret,
scope=SCOPES, user_agent=USER_AGENT)
uri = self.token.generate_authorize_url()
print('Please visit this URL to authorize the application:')
print(uri)
# Get the verification code from the standard input.
code = input('What is the verification code? ').strip()
self.token.get_access_token(code)
self.user_client = AppsClient(domain=self.domain, auth_token=self.token)
self.org_client = OrganizationUnitProvisioningClient(
domain=self.domain, auth_token=self.token)
def OrganizeUsers(self, customer_id, org_unit_path, pattern):
"""Find users with given pattern and move to an organization in batches.
Args:
customer_id: [string] customer_id to make calls to Organization API.
org_unit_path: [string] path of organization unit where users are moved
pattern: [regex object] regex to match with users
"""
users = self.user_client.RetrieveAllUsers()
matched_users = []
# Search the users that match given pattern
for user in users.entry:
if (pattern.search(user.login.user_name) or
pattern.search(user.name.given_name) or
pattern.search(user.name.family_name)):
user_email = '%s@%s' % (user.login.user_name, self.domain)
matched_users.append(user_email)
# Maximum BATCH_SIZE users can be moved at one time
# Split users into batches of BATCH_SIZE and move in batches
for i in range(0, len(matched_users), BATCH_SIZE):
batch_to_move = matched_users[i: i + BATCH_SIZE]
self.org_client.MoveUserToOrgUnit(customer_id,
org_unit_path, batch_to_move)
print(('Number of users moved = %d' % len(matched_users)))
def Run(self, org_unit_path, regex):
self.AuthorizeClient()
customer_id_entry = self.org_client.RetrieveCustomerId()
customer_id = customer_id_entry.customer_id
pattern = re.compile(regex)
print(('Moving Users with the pattern %s' % regex))
self.OrganizeUsers(customer_id, org_unit_path, pattern)
def main():
usage = 'Usage: %prog [options]'
parser = OptionParser(usage=usage)
parser.add_option('--DOMAIN',
help='Google Apps Domain, e.g. "domain.com".')
parser.add_option('--CLIENT_ID',
help='Registered CLIENT_ID of Domain.')
parser.add_option('--CLIENT_SECRET',
help='Registered CLIENT_SECRET of Domain.')
parser.add_option('--ORG_UNIT_PATH',
help='Orgunit path of organization where to move users.')
parser.add_option('--PATTERN',
help='Pattern to search in users')
(options, args) = parser.parse_args()
if not (options.DOMAIN and options.CLIENT_ID and options.CLIENT_SECRET
and options.ORG_UNIT_PATH and options.PATTERN):
parser.print_help()
return
sample = SearchAndOrganizeUsers(options.CLIENT_ID, options.CLIENT_SECRET,
options.DOMAIN)
sample.Run(options.ORG_UNIT_PATH, options.PATTERN)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
e815ef00e7e2a01d38ebb75253c4bf54a9f245ad | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/TileCalorimeter/TileExample/TileSimEx/scripts/makePlot.py | 769edfceb2d903852931704bb7f471b328dc6ce2 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60,939 | py | # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
#-----------------------------------------------------
# Author: Dengfeng Zhang
# [email protected]
#-----------------------------------------------------
# This script has for functions:
# GetPlotSingleProperty(): Get energy response, energy resolution,
# lateral spread and longitudinal profile of MC.
# GetDataPlotSingleProperty(): Get energy response, energy resolution,
# lateral spread and longitudinal profile of data.
# ComDataMC(): Compare MC results with data results, get the ratio of MC to data.
# Draw all MC, data and their ratios on one plot.
# (energy response and resolution, lateral spread and longitudinal spread) ;
# ComparePhysicsList(): Draw Draw all MC results, data results on one plot, not ratios.
# (energy response and resolution, lateral spread)
#-----------------------------------------------------
import ROOT,math,os,array
from ROOT import *
gROOT.ProcessLine("#include \"GraphToolKit.h\"")
gROOT.ProcessLine("#include \"HistToolKit.h\"")
Energies = [20000, 50000, 100000, 180000] # beam energy lists
Particles = ['pi', 'pr'] # particle types
PhysicsLists = ['FTFP_BERT', 'FTFP_BERT_ATL', 'QGSP_BERT', "QGSP_BIC"] # physics lists
# Get the current working dir
Dir = os.getcwd()
# Check main output dir holding output root files does exist in current working dir,
# otherwise create it.
ResultDir = Dir+"/results/"
if ROOT.gSystem.AccessPathName(ResultDir):
print ResultDir, "doesn't exist! Making"
gSystem.Exec("mkdir {}".format(ResultDir))
# Check main output dir holding plots does exist in current working dir,
# otherwise create it.
PlotDir = Dir+"/plots/"
if ROOT.gSystem.AccessPathName(PlotDir):
print PlotDir, "doesn't exist! Making"
gSystem.Exec("mkdir {}".format(PlotDir))
# Get the energy response and resolution, lateral spread
# and longitudinal of each type of particles, each beam energy and each physics list.
def GetPlotSingleProperty():
for Particle in Particles: # loop over particle types
# input path containing root files generated in GetEnergy.cxx
InPath = ResultDir+"/{}/".format(Particle)
# create output root file
# grapherrors of renponse, resolution and lateral spread
# and histograms of longitudinal profile will be wrote in it.
outputFile = ROOT.TFile.Open('{}/Properities_{}.root'.format(InPath,Particle),'RECREATE')
for PhysicsList in PhysicsLists: # loop over physics lists
# define array or list of responses, resolutions,
# lateral spreads and longitudinal profiles of all beam energies
Response = array.array('f') # array of energy responses of all beam energies for each type of particles and for each physics lists
ResponseError = array.array('f') #array of energy response errors (only statistical)
Resolution = array.array('f') #array of energy resolutions of all beam energies
ResolutionError = array.array('f') #array of energy resolution errors
LateralSpread = array.array('f') #array of lateral spreads of all beam energies
LateralSpreadError = array.array('f') #array of lateral spread errors of all beam energies
Es = array.array('f') # array of beam energies
EsError = array.array('f') # # array of beam energy errors, always 0
LongitudinalProfileList = [] # list of longitudinal profiles of all beam energies
NormalizedLongitudinalProfileList = [] # list of normalized longitudinal profiles of all beam energies
for Energy in Energies: # loop over all beam energies
Es.append(Energy/1000.)
EsError.append(0.)
# get input file generated in GetEnergy.cxx
# attaced to each beam energy, particle and physics list
inputFile = ROOT.TFile('{}/tiletb90-E{}-{}_{}.root'.format(InPath,Energy,Particle,PhysicsList),"read")
if not inputFile:
continue
print "InFile: ",inputFile.GetName()
# get histograms in input file
h_E = inputFile.Get("RecoE") # total energy distribution
h_EM0 = inputFile.Get("RecoEModule0") # distribution of energy in barrel module 0
h_EB = inputFile.Get("RecoECentralModule") # distribution of energy in central barrel module
h_LP = inputFile.Get("LongitudinalProfile") # get the longitudinal profile
h_LP.SetDirectory(0)
# define a gaus fun to fit total energy distribution
func = ROOT.TF1("func","gaus",h_E.GetMean()-2*h_E.GetRMS(),h_E.GetMean()+2*h_E.GetRMS())
print h_E.GetMean()-2*h_E.GetRMS()," ", h_E.GetMean()+2*h_E.GetRMS()
h_E.Fit("func","R") # fit the total energy distribution by a Gaussian
gStyle.SetOptFit(1)
canvas = ROOT.TCanvas("canvas","",800,600)
h_E.Draw()
canvas.Print(ResultDir+'/{}/totalE_{}_{}_{}.pdf'.format(Particle,Particle,Energy,PhysicsList))
# energy response is the mean of the gaussian fitting/beam energy,
# energy resolution is sigma/mean of the gaussian fitting
Response.append(func.GetParameter(1)*1000/Energy)
ResponseError.append(func.GetParError(1)*1000/Energy)
Resolution.append(func.GetParameter(2)/func.GetParameter(1)*100)
ResolutionError.append(func.GetParError(2)/func.GetParameter(1)*100)
# Get lateral spread(mean energy in module 0/ mean energy in central barrel)
LS = h_EM0.GetMean()/h_EB.GetMean()
LSError = LS*math.sqrt(pow(h_EM0.GetMeanError()/h_EM0.GetMean(), 2)+pow(h_EB.GetMeanError()/h_EB.GetMean(), 2))
LateralSpread.append(LS)
LateralSpreadError.append(LSError)
# get the longitudinal profiles scaling by the energy response
h_LP.Scale(1./(func.GetParameter(1)*1000/Energy)) #FIXME
#h_LP.Scale(Energy/1000/h_LP.Integral("width")) #FIXME
# get the normalized longitudinal profiles normalize it to 1
h_NormalizedLP=h_LP.Clone()
h_NormalizedLP.SetDirectory(0)
h_NormalizedLP.Scale(1./h_LP.Integral("width"))
h_LP.SetName("{}_{}GeV_{}_LongitudinalProfile".format(Particle,Energy/1000, PhysicsList))
h_LP.SetTitle("{} GeV".format(Energy/1000))
h_NormalizedLP.SetName("{}_{}GeV_{}_NormalizedLongitudinalProfile".format(Particle, Energy/1000, PhysicsList))
h_NormalizedLP.SetTitle("{} GeV".format(Energy/1000))
h_NormalizedLP.GetYaxis().SetTitle("1/E_{tot}#timesdE/dx[1/#lambda]")
LongitudinalProfileList.append(h_LP)
NormalizedLongitudinalProfileList.append(h_NormalizedLP)
print LongitudinalProfileList, NormalizedLongitudinalProfileList
outputFile.cd()
# create the grapherrors of energy responses
gr_response = ROOT.TGraphErrors(len(Es),Es,Response,EsError,ResponseError)
gr_response.SetName("{}_{}_Response".format(Particle,PhysicsList))
gr_response.SetTitle("{} {} Response".format(Particle,PhysicsList))
gr_response.GetXaxis().SetTitle("E_{beam}[GeV]")
gr_response.GetYaxis().SetTitle("E_{total}/E_{beam}")
# create the grapherrors of energy resolutions
gr_resolution = ROOT.TGraphErrors(len(Es),Es,Resolution,EsError,ResolutionError)
gr_resolution.SetName("{}_{}_Resolution".format(Particle,PhysicsList))
gr_resolution.SetTitle("{} {} Resolution".format(Particle,PhysicsList))
gr_resolution.GetYaxis().SetTitle("resolution[%]")
gr_resolution.GetXaxis().SetTitle("E_{beam}[GeV]")
# create the grapherrors of lateral spread
gr_lateralspread = ROOT.TGraphErrors(len(Es),Es,LateralSpread,EsError,LateralSpreadError)
gr_lateralspread.SetName("{}_{}_LateralSpread".format(Particle,PhysicsList))
gr_lateralspread.SetTitle("{} {} LateralSpread".format(Particle,PhysicsList))
gr_lateralspread.GetYaxis().SetTitle("E_{Module0}/E_{Barrel}")
gr_lateralspread.GetXaxis().SetTitle("E_{beam}[GeV]")
# set the x range of grapherrors of response and resolution
gr_response.GetXaxis().SetRangeUser(10, 210)
gr_response.GetYaxis().SetNdivisions(510)
gr_resolution.GetXaxis().SetRangeUser(10, 210)
gr_resolution.GetYaxis().SetNdivisions(510)
gr_lateralspread.GetXaxis().SetRangeUser(10, 210)
gr_lateralspread.GetYaxis().SetNdivisions(510)
# set the x range of grapherrors of lateral spread
if(Particle=="pi"):
gr_lateralspread.GetYaxis().SetRangeUser(0.025, 0.055)
gr_lateralspread.GetYaxis().SetNdivisions(503) ;
elif(Particle=="pr"):
gr_lateralspread.GetYaxis().SetRangeUser(0.025, 0.065)
gr_lateralspread.GetYaxis().SetNdivisions(504) ;
# define output for each particle type,
# if this dir doesn't exist, create it.
OutPath = PlotDir+"/{}/".format(Particle)
if ROOT.gSystem.AccessPathName(OutPath):
print OutPath, "doesn't exist! Making"
ROOT.gSystem.Exec("mkdir {}".format(OutPath))
FullParticleName=""
if Particle=='pi':
FullParticleName = "Pion"
elif Particle=='pr':
FullParticleName = "Proton"
# loop over beam energies to draw single longitudinal profile
for i, Energy in enumerate(Energies):
LongitudinalProfileList[i].Write()
NormalizedLongitudinalProfileList[i].Write()
# draw the single plot of normalized longitudinal profile and longitudinal profile # of each type of particle and each physics list and each beam energy
DrawSingleHistOnCanvas(OutPath+LongitudinalProfileList[i].GetName(), LongitudinalProfileList[i], "PE", False, True, False, "#splitline{"+"{}GeV {}".format(Energy/1000, FullParticleName)+"}{"+"{}".format(PhysicsList)+"}")
DrawSingleHistOnCanvas(OutPath+NormalizedLongitudinalProfileList[i].GetName(), NormalizedLongitudinalProfileList[i], "PE",False, True, False, "#splitline{"+"{}GeV {}".format(Energy/1000, FullParticleName)+"}{"+"{}".format(PhysicsList)+"}")
LongitudinalProfileList[0].GetYaxis().SetRangeUser(1E-3, 100.)
NormalizedLongitudinalProfileList[0].GetYaxis().SetRangeUser(1E-5, 1.)
# Draw four longitudinal profiles of 4 beam energies of each type of particle and
# each physics list on same canvas
DrawFourHistsOnCanvas("{}/{}_{}_LongitudinalProfile_LogY".format(OutPath,Particle,PhysicsList), LongitudinalProfileList[0], LongitudinalProfileList[1], LongitudinalProfileList[2], LongitudinalProfileList[3],"pe", "pesame", "pesame", "pesame", False, True, False, FullParticleName, PhysicsList)
DrawFourHistsOnCanvas("{}/{}_{}_NormalizedLongitudinalProfile_LogY".format(OutPath,Particle,PhysicsList), NormalizedLongitudinalProfileList[0], NormalizedLongitudinalProfileList[1], NormalizedLongitudinalProfileList[2], NormalizedLongitudinalProfileList[3],"pe", "pesame", "pesame", "pesame", False, True, False, FullParticleName, PhysicsList)
# don't use logy on y axis
LongitudinalProfileList[0].GetYaxis().SetRangeUser(0., 40.)
NormalizedLongitudinalProfileList[0].GetYaxis().SetRangeUser(0., 0.25)
DrawFourHistsOnCanvas("{}/{}_{}_LongitudinalProfile".format(OutPath,Particle,PhysicsList), LongitudinalProfileList[0], LongitudinalProfileList[1], LongitudinalProfileList[2], LongitudinalProfileList[3],"pe", "pesame", "pesame", "pesame", False, False, False, FullParticleName, PhysicsList)
DrawFourHistsOnCanvas("{}/{}_{}_NormalizedLongitudinalProfile".format(OutPath,Particle,PhysicsList), NormalizedLongitudinalProfileList[0], NormalizedLongitudinalProfileList[1], NormalizedLongitudinalProfileList[2], NormalizedLongitudinalProfileList[3],"pe", "pesame", "pesame", "pesame", False, False, False, FullParticleName, PhysicsList)
# draw single grapherrors of responses , resolutions and lateral spread
DrawSingleGraphErrorsOnCanvas("{}/{}_{}_Response".format(OutPath,Particle,PhysicsList),gr_response,"AP", False, False, False, FullParticleName+" "+PhysicsList)
DrawSingleGraphErrorsOnCanvas("{}/{}_{}_Resolution".format(OutPath,Particle,PhysicsList),gr_resolution,"AP", False, False, False, FullParticleName+" "+PhysicsList)
DrawSingleGraphErrorsOnCanvas("{}/{}_{}_LateralSpread".format(OutPath,Particle,PhysicsList), gr_lateralspread,"AP", False, False, False, FullParticleName+" "+PhysicsList)
gr_response.Write()
gr_resolution.Write()
gr_lateralspread.Write()
print Response
print Resolution
print LateralSpread
outputFile.Write()
# get the energy responses and resolutions, lateral spreads and longitudianl of data
# data results are extracted from http://indico.cern.ch/event/135703/contributions/134036/attachments/108421/154300/main.pdf
def GetDataPlotSingleProperty():
# pion responses of data
PiResponse = array.array('f', [0.808, 0.844, 0.856, 0.867])
PiResponseError = array.array('f', [0., 0., 0., 0.])
# proton responses of data, proton only has 3 beam energies
PrResponse = array.array('f', [0.811, 0.83, 0.845])
PrResponseError = array.array('f', [0., 0., 0.])
# pion resolutions of data
PiResolution = array.array('f', [11.94, 8.92, 6.78, 6.02])
PiResolutionError = array.array('f', [0., 0., 0., 0.])
# proton resolutions of data, proton only has 3 beam energies
PrResolution = array.array('f', [8.63, 5.97, 5.16])
PrResolutionError = array.array('f', [0., 0., 0.])
# pion latreal spreads of data
PiLateralSpread = array.array('f', [0.044, 0.0379, 0.0342, 0.034])
PiLateralSpreadError = array.array('f', [0., 0., 0., 0.])
PrLateralSpread = array.array('f', [0.045, 0.0403, 0.0396])
PrLateralSpreadError = array.array('f', [0., 0., 0.])
# pion has four beam energies
PiEs = array.array('f', [20., 50., 100., 180.])
PiEsError = array.array('f', [0., 0., 0., 0.])
# Be careful that proton only has three beam energies
PrEs = array.array('f', [50., 100., 180.])
PrEsError = array.array('f', [0., 0., 0.])
# pion longitudinal profiles of data
PiLongitudinalProfile20GeV = array.array('f',[4.88076, 4.29345, 1.90255, 0.760799, 0.336904, 0.116429, 0.0472258, 0.0212191, 0.010869])
PiLongitudinalProfileError20GeV = array.array('f',[0, 0, 0, 0, 0, 0, 0, 0, 0])
PiLongitudinalProfile50GeV = array.array('f',[10.1243, 10.3069, 5.44077, 2.55502, 1.18216, 0.486682, 0.197446, 0.0913368, 0.0474821, 0.0181673, 0.00878025])
PiLongitudinalProfileError50GeV = array.array('f',[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
PiLongitudinalProfile100GeV = array.array('f',[16.6323,21.0755,12.1435,6.13442,3.14342,1.37201,0.625483,0.31123,0.143954,0.0619092,0.022023,0.0199365])
PiLongitudinalProfileError100GeV = array.array('f',[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
PiLongitudinalProfile180GeV = array.array('f',[28.1277,37.7873,21.7727,11.4903,6.33449,2.88857,1.31695,0.655294,0.303115,0.140209,0.0739654,0.0318035,0.0145007])
PiLongitudinalProfileError180GeV = array.array('f',[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0])
# proton longitudinal profiles of data
PrLongitudinalProfile50GeV = array.array('f',[10.2289,10.3627,5.51951,2.54066,1.16948,0.472035,0.174547,0.0747019,0.0310458,0.0099195,0.0043075])
PrLongitudinalProfileError50GeV = array.array('f',[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
PrLongitudinalProfile100GeV = array.array('f',[18.3511,21.2032,11.4597,5.51097,2.61195,1.039,0.431832,0.193063,0.0814251,0.0364116,0.00962173,0.00783076])
PrLongitudinalProfileError100GeV = array.array('f',[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
PrLongitudinalProfile180GeV = array.array('f',[30.1568,39.1626,21.7967,10.7928,5.42299,2.2868,0.978724,0.437566,0.198557,0.0813227,0.0256083,0.0114493,0.00382185])
PrLongitudinalProfileError180GeV = array.array('f',[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
# define output dir of root files and plots,
# if they doesn't exist, create them.
OutPathResult = ResultDir+"/data/"
OutPathPlot = PlotDir+"/data/"
if ROOT.gSystem.AccessPathName(OutPathResult):
print OutPathResult, "doesn't exist! Making"
ROOT.gSystem.Exec("mkdir {}".format(OutPathResult))
if ROOT.gSystem.AccessPathName(OutPathPlot):
print OutPathPlot, "doesn't exist! Making"
ROOT.gSystem.Exec("mkdir {}".format(OutPathPlot))
# create out file
outputFile = ROOT.TFile.Open('{}/data.root'.format(OutPathResult),'RECREATE')
# create out grapherrors of response, resolution and lateral spreads
gr_piresponse = ROOT.TGraphErrors(len(PiEs),PiEs,PiResponse,PiEsError,PiResponseError)
gr_piresponse.SetName("pi_Response")
gr_piresponse.SetTitle("Pion")
gr_piresponse.GetXaxis().SetTitle("E_{beam}[GeV]")
gr_piresponse.GetYaxis().SetTitle("E_{total}/E_{beam}")
gr_prresponse = ROOT.TGraphErrors(len(PrEs),PrEs,PrResponse,PrEsError,PrResponseError)
gr_prresponse.SetName("pr_Response")
gr_prresponse.SetTitle("Proton")
gr_prresponse.GetXaxis().SetTitle("E_{beam}[GeV]")
gr_prresponse.GetYaxis().SetTitle("E_{total}/E_{beam}")
gr_piresolution = ROOT.TGraphErrors(len(PiEs),PiEs,PiResolution,PiEsError,PiResolutionError)
gr_piresolution.SetName("pi_Resolution")
gr_piresolution.SetTitle("Pion")
gr_piresolution.GetXaxis().SetTitle("E_{beam}[GeV]")
gr_piresolution.GetYaxis().SetTitle("resolution[%]")
gr_prresolution = ROOT.TGraphErrors(len(PrEs),PrEs,PrResolution,PrEsError,PrResolutionError)
gr_prresolution.SetName("pr_Resolution")
gr_prresolution.SetTitle("Proton")
gr_prresolution.GetXaxis().SetTitle("E_{beam}[GeV]")
gr_prresolution.GetYaxis().SetTitle("resolution[%]")
gr_pilateralspread = ROOT.TGraphErrors(len(PiEs),PiEs,PiLateralSpread,PiEsError,PiLateralSpreadError)
gr_pilateralspread.SetName("pi_LateralSpread")
gr_pilateralspread.SetTitle("Pion")
gr_pilateralspread.GetXaxis().SetTitle("E_{beam}[GeV]")
gr_pilateralspread.GetYaxis().SetTitle("E_{Module0}/E_{Barrel}")
gr_prlateralspread = ROOT.TGraphErrors(len(PrEs),PrEs,PrLateralSpread,PrEsError,PrLateralSpreadError)
gr_prlateralspread.SetName("pr_LateralSpread")
gr_prlateralspread.SetTitle("Proton")
gr_prlateralspread.GetXaxis().SetTitle("E_{beam}[GeV]")
gr_prlateralspread.GetYaxis().SetTitle("E_{Module0}/E_{Barrel}")
NBCells=13 # only use 13 of 18 B cells
# bin edges of longitudinal profiles histograms
xBLow = array.array('f',[0.119333,1.67226,3.44703,5.0887,6.686,8.15019,9.61438,10.9898,12.3582,13.7407,15.1233,16.4916,17.8671,19.3313])
# create longitudinal profiles histograms for all particles and beam energies
pi_LongitudinalProfile20GeV = bookTH1F("pi_LongitudinalProfile_20GeV", "20 GeV", "x[#lambda]", "dE/dx[GeV/#lambda]", NBCells, xBLow)
pi_LongitudinalProfile50GeV = bookTH1F("pi_LongitudinalProfile_50GeV", "50 GeV", "x[#lambda]", "dE/dx[GeV/#lambda]", NBCells, xBLow)
pi_LongitudinalProfile100GeV = bookTH1F("pi_LongitudinalProfile_100GeV", "100 GeV", "x[#lambda]", "dE/dx[GeV/#lambda]", NBCells, xBLow)
pi_LongitudinalProfile180GeV = bookTH1F("pi_LongitudinalProfile_180GeV", "180 GeV", "x[#lambda]", "dE/dx[GeV/#lambda]", NBCells, xBLow)
pr_LongitudinalProfile50GeV = bookTH1F("pr_LongitudinalProfile_50GeV", "50 GeV", "x[#lambda]", "dE/dx[GeV/#lambda]", NBCells, xBLow)
pr_LongitudinalProfile100GeV = bookTH1F("pr_LongitudinalProfile_100GeV", "100 GeV", "x[#lambda]", "dE/dx[GeV/#lambda]", NBCells, xBLow)
pr_LongitudinalProfile180GeV = bookTH1F("pr_LongitudinalProfile_180GeV", "180 GeV", "x[#lambda]", "dE/dx[GeV/#lambda]", NBCells, xBLow)
# fill longitudinal profile histograms
for i in range(len(PiLongitudinalProfile20GeV)):
pi_LongitudinalProfile20GeV.SetBinContent(i+1,PiLongitudinalProfile20GeV[i])
pi_LongitudinalProfile20GeV.SetBinError(i+1,PiLongitudinalProfileError20GeV[i])
for i in range(len(PiLongitudinalProfile50GeV)):
pi_LongitudinalProfile50GeV.SetBinContent(i+1,PiLongitudinalProfile50GeV[i])
pi_LongitudinalProfile50GeV.SetBinError(i+1,PiLongitudinalProfileError50GeV[i])
for i in range(len(PiLongitudinalProfile100GeV)):
pi_LongitudinalProfile100GeV.SetBinContent(i+1,PiLongitudinalProfile100GeV[i])
pi_LongitudinalProfile100GeV.SetBinError(i+1,PiLongitudinalProfileError100GeV[i])
for i in range(len(PrLongitudinalProfile180GeV)):
pi_LongitudinalProfile180GeV.SetBinContent(i+1,PiLongitudinalProfile180GeV[i])
pi_LongitudinalProfile180GeV.SetBinError(i+1,PiLongitudinalProfileError180GeV[i])
for i in range(len(PrLongitudinalProfile50GeV)):
pr_LongitudinalProfile50GeV.SetBinContent(i+1,PrLongitudinalProfile50GeV[i])
pr_LongitudinalProfile50GeV.SetBinError(i+1,PrLongitudinalProfileError50GeV[i])
for i in range(len(PrLongitudinalProfile100GeV)):
pr_LongitudinalProfile100GeV.SetBinContent(i+1,PrLongitudinalProfile100GeV[i])
pr_LongitudinalProfile100GeV.SetBinError(i+1,PrLongitudinalProfileError100GeV[i])
for i in range(len(PrLongitudinalProfile180GeV)):
pr_LongitudinalProfile180GeV.SetBinContent(i+1,PrLongitudinalProfile180GeV[i])
pr_LongitudinalProfile180GeV.SetBinError(i+1,PrLongitudinalProfileError180GeV[i])
# get the normalized longitudinal profiles
pi_NormalizedLongitudinalProfile20GeV=pi_LongitudinalProfile20GeV.Clone()
pi_NormalizedLongitudinalProfile20GeV.Scale(1./pi_LongitudinalProfile20GeV.Integral("width"))
pi_NormalizedLongitudinalProfile20GeV.SetName("pi_NormalizedLongitudinalProfile20GeV")
pi_NormalizedLongitudinalProfile20GeV.GetYaxis().SetTitle("1/E_{tot}#timesdE/dx[1/#lambda]")
pi_NormalizedLongitudinalProfile50GeV=pi_LongitudinalProfile50GeV.Clone()
pi_NormalizedLongitudinalProfile50GeV.Scale(1./pi_LongitudinalProfile50GeV.Integral("width"))
pi_NormalizedLongitudinalProfile50GeV.SetName("pi_NormalizedLongitudinalProfile50GeV")
pi_NormalizedLongitudinalProfile50GeV.GetYaxis().SetTitle("1/E_{tot}#timesdE/dx[1/#lambda]")
pi_NormalizedLongitudinalProfile100GeV=pi_LongitudinalProfile100GeV.Clone()
pi_NormalizedLongitudinalProfile100GeV.Scale(1./pi_LongitudinalProfile100GeV.Integral("width"))
pi_NormalizedLongitudinalProfile100GeV.SetName("pi_NormalizedLongitudinalProfile100GeV")
pi_NormalizedLongitudinalProfile100GeV.GetYaxis().SetTitle("1/E_{tot}#timesdE/dx[1/#lambda]")
pi_NormalizedLongitudinalProfile180GeV=pi_LongitudinalProfile180GeV.Clone()
pi_NormalizedLongitudinalProfile180GeV.Scale(1./pi_LongitudinalProfile180GeV.Integral("width"))
pi_NormalizedLongitudinalProfile180GeV.SetName("pi_NormalizedLongitudinalProfile180GeV")
pi_NormalizedLongitudinalProfile180GeV.GetYaxis().SetTitle("1/E_{tot}#timesdE/dx[1/#lambda]")
pr_NormalizedLongitudinalProfile50GeV=pr_LongitudinalProfile50GeV.Clone()
pr_NormalizedLongitudinalProfile50GeV.Scale(1./pr_LongitudinalProfile50GeV.Integral("width"))
pr_NormalizedLongitudinalProfile50GeV.SetName("pr_NormalizedLongitudinalProfile50GeV")
pr_NormalizedLongitudinalProfile50GeV.GetYaxis().SetTitle("1/E_{tot}#timesdE/dx[1/#lambda]")
pr_NormalizedLongitudinalProfile100GeV=pr_LongitudinalProfile100GeV.Clone()
pr_NormalizedLongitudinalProfile100GeV.Scale(1./pr_LongitudinalProfile100GeV.Integral("width"))
pr_NormalizedLongitudinalProfile100GeV.SetName("pr_NormalizedLongitudinalProfile100GeV")
pr_NormalizedLongitudinalProfile100GeV.GetYaxis().SetTitle("1/E_{tot}#timesdE/dx[1/#lambda]")
pr_NormalizedLongitudinalProfile180GeV=pr_LongitudinalProfile180GeV.Clone()
pr_NormalizedLongitudinalProfile180GeV.Scale(1./pr_LongitudinalProfile180GeV.Integral("width"))
pr_NormalizedLongitudinalProfile180GeV.SetName("pr_NormalizedLongitudinalProfile180GeV")
pr_NormalizedLongitudinalProfile180GeV.GetYaxis().SetTitle("1/E_{tot}#timesdE/dx[1/#lambda]")
# draw plots of response resolution and longitudinal profile
DrawSingleGraphErrorsOnCanvas("{}/pi_LateralSpread".format(OutPathPlot), gr_pilateralspread,"AP", False, False, False)
DrawSingleGraphErrorsOnCanvas("{}/pi_Response".format(OutPathPlot), gr_piresponse,"AP",False, False, False)
DrawSingleGraphErrorsOnCanvas("{}/pi_Resolution".format(OutPathPlot), gr_piresolution,"AP", False, False, False)
DrawSingleGraphErrorsOnCanvas("{}/pr_LateralSpread".format(OutPathPlot), gr_pilateralspread,"AP", False, False, False)
DrawSingleGraphErrorsOnCanvas("{}/pr_Resolution".format(OutPathPlot), gr_prresolution,"AP")
# draw of response resolution and longitudinal profile of pion and proton on same canvas
DrawTwoGraphErrorsOnCanvas("{}/pipr_Resolution".format(OutPathPlot), gr_piresolution, gr_prresolution,"AP", "AP", False, False, False)
DrawTwoGraphErrorsOnCanvas("{}/pipr_Response".format(OutPathPlot), gr_piresponse, gr_prresponse,"AP", "AP", False, False, False)
DrawTwoGraphErrorsOnCanvas("{}/pipr_LateralSpread".format(OutPathPlot), gr_pilateralspread, gr_prlateralspread,"AP", "AP", False, False, False)
# draw single longitudinal profile of each beam energy
DrawSingleHistOnCanvas("{}/pi_LongitudinalProfile20GeV".format(OutPathPlot),pi_LongitudinalProfile20GeV, "PE", False, True, False)
DrawSingleHistOnCanvas("{}/pi_LongitudinalProfile50GeV".format(OutPathPlot),pi_LongitudinalProfile50GeV, "PE", False, True, False)
DrawSingleHistOnCanvas("{}/pi_LongitudinalProfile100GeV".format(OutPathPlot),pi_LongitudinalProfile100GeV, "PE", False, True, False)
DrawSingleHistOnCanvas("{}/pi_LongitudinalProfile180GeV".format(OutPathPlot),pi_LongitudinalProfile180GeV, "PE", False, True, False)
DrawSingleHistOnCanvas("{}/pi_NormalizedLongitudinalProfile20GeV".format(OutPathPlot),pi_LongitudinalProfile20GeV, "PE", False, True, False)
DrawSingleHistOnCanvas("{}/pi_NormalizedLongitudinalProfile50GeV".format(OutPathPlot),pi_LongitudinalProfile50GeV, "PE", False, True, False)
DrawSingleHistOnCanvas("{}/pi_NormalizedLongitudinalProfile100GeV".format(OutPathPlot),pi_LongitudinalProfile100GeV, "PE", False, True, False)
DrawSingleHistOnCanvas("{}/pi_NormalizedLongitudinalProfile180GeV".format(OutPathPlot),pi_LongitudinalProfile180GeV, "PE", False, True, False)
# draw 4 longitudinal profiles of pions of 4 beam energies on same canvas
pi_LongitudinalProfile20GeV.GetYaxis().SetRangeUser(1E-3, 100.)
pi_NormalizedLongitudinalProfile20GeV.GetYaxis().SetRangeUser(1E-5, 1.)
DrawFourHistsOnCanvas("{}/pi_LongitudinalProfile_LogY".format(OutPathPlot),pi_LongitudinalProfile20GeV,pi_LongitudinalProfile50GeV,pi_LongitudinalProfile100GeV,pi_LongitudinalProfile180GeV,"pe", "pesame", "pesame", "pesame", False, True, False, "Pion")
DrawFourHistsOnCanvas("{}/pi_NormalizedLongitudinalProfile_LogY".format(OutPathPlot),pi_NormalizedLongitudinalProfile20GeV,pi_NormalizedLongitudinalProfile50GeV,pi_NormalizedLongitudinalProfile100GeV,pi_NormalizedLongitudinalProfile180GeV,"pe", "pesame", "pesame", "pesame", False, True, False, "Pion")
pi_LongitudinalProfile20GeV.GetYaxis().SetRangeUser(0., 40.)
pi_NormalizedLongitudinalProfile20GeV.GetYaxis().SetRangeUser(0., 0.25)
DrawFourHistsOnCanvas("{}/pi_LongitudinalProfile".format(OutPathPlot),pi_LongitudinalProfile20GeV,pi_LongitudinalProfile50GeV,pi_LongitudinalProfile100GeV,pi_LongitudinalProfile180GeV,"pe", "pesame", "pesame", "pesame", False, False, False, "Pion")
DrawFourHistsOnCanvas("{}/pi_NormalizedLongitudinalProfile".format(OutPathPlot),pi_NormalizedLongitudinalProfile20GeV,pi_NormalizedLongitudinalProfile50GeV,pi_NormalizedLongitudinalProfile100GeV,pi_NormalizedLongitudinalProfile180GeV,"pe", "pesame", "pesame", "pesame", False, False, False, "Pion")
# draw 3 longitudinal profiles of pions of 3 beam energies of pion on same canvas
DrawSingleHistOnCanvas("{}/pr_LongitudinalProfile50GeV".format(OutPathPlot),pr_LongitudinalProfile50GeV,"PE", False, True, False)
DrawSingleHistOnCanvas("{}/pr_LongitudinalProfile100GeV".format(OutPathPlot),pr_LongitudinalProfile100GeV, "PE", False, True, False)
DrawSingleHistOnCanvas("{}/pr_LongitudinalProfile180GeV".format(OutPathPlot),pr_LongitudinalProfile180GeV, "PE", False, True, False)
# draw 3 longitudinal profiles of proton of 3 beam energies on same canvas
pr_LongitudinalProfile50GeV.GetYaxis().SetRangeUser(1E-3, 100.)
pr_NormalizedLongitudinalProfile50GeV.GetYaxis().SetRangeUser(1E-5, 1.)
DrawThreeHistsOnCanvas("{}/pr_LongitudinalProfile_LogY".format(OutPathPlot),pr_LongitudinalProfile50GeV, pr_LongitudinalProfile100GeV, pr_LongitudinalProfile180GeV, "pe", "pesame", "pesame", False, True, False, "Proton")
DrawThreeHistsOnCanvas("{}/pr_NormalizedLongitudinalProfile_LogY".format(OutPathPlot),pr_NormalizedLongitudinalProfile50GeV, pr_NormalizedLongitudinalProfile100GeV, pr_NormalizedLongitudinalProfile180GeV, "pe", "pesame", "pesame", False, True, False, "Proton")
pr_LongitudinalProfile50GeV.GetYaxis().SetRangeUser(0., 40.)
pr_NormalizedLongitudinalProfile50GeV.GetYaxis().SetRangeUser(0., 0.25)
DrawThreeHistsOnCanvas("{}/pr_LongitudinalProfile".format(OutPathPlot),pr_LongitudinalProfile50GeV, pr_LongitudinalProfile100GeV, pr_LongitudinalProfile180GeV, "pe", "pesame", "pesame", False, False, False, "Proton")
DrawThreeHistsOnCanvas("{}/pr_NormalizedLongitudinalProfile".format(OutPathPlot),pr_NormalizedLongitudinalProfile50GeV, pr_NormalizedLongitudinalProfile100GeV, pr_NormalizedLongitudinalProfile180GeV, "pe", "pesame", "pesame", False, False, False, "Proton")
# save
gr_piresponse.Write()
gr_piresolution.Write()
gr_pilateralspread.Write()
gr_prresponse.Write()
gr_prresolution.Write()
gr_prlateralspread.Write()
pi_LongitudinalProfile20GeV.Write()
pi_LongitudinalProfile50GeV.Write()
pi_LongitudinalProfile100GeV.Write()
pi_LongitudinalProfile180GeV.Write()
pi_NormalizedLongitudinalProfile20GeV.Write()
pi_NormalizedLongitudinalProfile50GeV.Write()
pi_NormalizedLongitudinalProfile100GeV.Write()
pi_NormalizedLongitudinalProfile180GeV.Write()
pr_LongitudinalProfile50GeV.Write()
pr_LongitudinalProfile100GeV.Write()
pr_LongitudinalProfile180GeV.Write()
pr_NormalizedLongitudinalProfile50GeV.Write()
pr_NormalizedLongitudinalProfile100GeV.Write()
pr_NormalizedLongitudinalProfile180GeV.Write()
outputFile.Write()
# compare the mc results with data
def ComDataMC():
for Particle in Particles: # loop over particles
inputFile = ROOT.TFile.Open('{}/{}/Properities_{}.root'.format(ResultDir,Particle,Particle)) # input file generated in GetPlotSingleProperty(), contain all MC results
inputFile2 = ROOT.TFile.Open('{}/data/data.root'.format(ResultDir)) # input file generated in GetDataPlotSingleProperty(), contain all Data results
if not inputFile:
continue
outputFile = ROOT.TFile.Open('{}/{}/{}_Ratio.root'.format(ResultDir, Particle, Particle),'RECREATE') # out file to store rations of MC to data
ResponseList = [] # list of responses of all physics lists
ResolutionList = [] # list of reslotionss of all physics lists
LateralSpreadList = [] # list of latreal spreadd of all physics lists
ResponseRatioList = [] #list of ratios of responses MCs with all physics lists to data
ResolutionRatioList = [] # list of ratios of resolutions MCs to data
LateralSpreadRatioList = [] # list of ratios of lateral spreads MCs to data
# get grapherrors from data file
ger_dataresponse = inputFile2.Get("{}_Response".format(Particle))
ger_dataresolution = inputFile2.Get("{}_Resolution".format(Particle))
ger_datalateralspread = inputFile2.Get("{}_LateralSpread".format(Particle))
# list of profiles of all beam energies
datalongitudinalprofilelist = []
datanormalizedlongitudinalprofilelist = []
# list of profiles of MC
mclongitudinalprofilelists = []
mcnormalizedlongitudinalprofilelists = []
# list of ratios of profiles of MCs to data
longitudinalprofileratiolists = []
normalizedlongitudinalprofileratiolists = []
# loop over the beam energies to get all profiles of this particle of data
for Energy in Energies:
# proton doesn't has beam energy 20 GeV
if Particle=='pr' and Energy==20000: continue
datalongitudinalprofilelist.append(inputFile2.Get("{}_LongitudinalProfile_{}GeV".format(Particle, Energy/1000)))
datanormalizedlongitudinalprofilelist.append(inputFile2.Get("{}_NormalizedLongitudinalProfile{}GeV".format(Particle, Energy/1000)))
# loop over physics lists,
# to get all responses, resolutions and lateral spreads of each physics lists.
for PhysicsList in PhysicsLists:
ger_mcresponse = inputFile.Get("{}_{}_Response".format(Particle, PhysicsList))
ger_mcresponse.SetTitle(PhysicsList)
ger_mcresolution = inputFile.Get("{}_{}_Resolution".format(Particle, PhysicsList))
ger_mcresolution.SetTitle(PhysicsList)
ger_mclateralspread = inputFile.Get("{}_{}_LateralSpread".format(Particle, PhysicsList))
ger_mclateralspread.SetTitle(PhysicsList)
ResponseList.append(ger_mcresponse)
ResolutionList.append(ger_mcresolution)
LateralSpreadList.append(ger_mclateralspread)
N = ger_dataresponse.GetN()
# create histograms of responses, resolutions and lateral spreands of data,
# divide by the corresponding histogram of MC.
# number of bins = number of points in corresponding grapherrors.
h_data_response = ROOT.TH1F("h_data_response","data",N, 0, N) ;
h_data_resolution = ROOT.TH1F("h_data_resolution","data",N, 0, N) ;
h_data_lateralspread = ROOT.TH1F("h_data_lateralspread","data",N, 0, N) ;
Xs = ger_dataresponse.GetX()
Xerrors = ger_dataresponse.GetEX()
dataresponses = ger_dataresponse.GetY()
dataresolutions = ger_dataresolution.GetY()
datalateralspreads = ger_datalateralspread.GetY()
# fill the point values to histograms
for i in range(N):
h_data_response.SetBinContent(i+1, dataresponses[i])
h_data_response.SetBinError(i+1, ger_dataresponse.GetErrorY(i))
h_data_resolution.SetBinContent(i+1, dataresolutions[i])
h_data_resolution.SetBinError(i+1, ger_dataresolution.GetErrorY(i))
h_data_lateralspread.SetBinContent(i+1, datalateralspreads[i])
h_data_lateralspread.SetBinError(i+1, ger_datalateralspread.GetErrorY(i))
# create histograms of responses, resolutions and lateral spreands of MC.
h_mc_response = ROOT.TH1F("h_mc_response","",N, 0, N) ;
h_mc_resolution = ROOT.TH1F("h_mc_resolution","",N, 0, N) ;
h_mc_lateralspread = ROOT.TH1F("h_mc_lateralspread","",N, 0, N) ;
mcresponses = ger_mcresponse.GetY()
mcresolutions = ger_mcresolution.GetY()
mclateralspreads = ger_mclateralspread.GetY()
for i in range(N):
if Particle=="pr":
# ptoton doesn't have 20 GeV, so skip the first point in grapherrors
h_mc_response.SetBinContent(i+1, mcresponses[i+1])
h_mc_response.SetBinError(i+1, ger_mcresponse.GetErrorY(i+1))
h_mc_resolution.SetBinContent(i+1, mcresolutions[i+1])
h_mc_resolution.SetBinError(i+1, ger_mcresolution.GetErrorY(i+1))
h_mc_lateralspread.SetBinContent(i+1, mclateralspreads[i])
h_mc_lateralspread.SetBinError(i+1, ger_mclateralspread.GetErrorY(i+1))
elif Particle=="pi":
h_mc_response.SetBinContent(i+1, mcresponses[i])
h_mc_response.SetBinError(i+1, ger_mcresponse.GetErrorY(i))
h_mc_resolution.SetBinContent(i+1, mcresolutions[i])
h_mc_resolution.SetBinError(i+1, ger_mcresolution.GetErrorY(i))
h_mc_lateralspread.SetBinContent(i+1, mclateralspreads[i])
h_mc_lateralspread.SetBinError(i+1, ger_mclateralspread.GetErrorY(i))
# divide two hists to get the ratios
h_response_ratio = h_mc_response.Clone()
h_response_ratio.Divide(h_data_response)
h_resolution_ratio = h_mc_resolution.Clone()
h_resolution_ratio.Divide(h_data_resolution)
h_lateralspread_ratio = h_mc_lateralspread.Clone()
h_lateralspread_ratio.Divide(h_data_lateralspread)
# create grapherrors of ratios
ger_response_ratio = ROOT.TGraphErrors()
ger_response_ratio.SetName("{}_{}_Response_Ratio".format(Particle, PhysicsList))
ger_response_ratio.SetTitle(PhysicsList)
ger_resolution_ratio = ROOT.TGraphErrors()
ger_resolution_ratio.SetName("{}_{}_Resolution_Ratio".format(Particle, PhysicsList))
ger_resolution_ratio.SetTitle(PhysicsList)
ger_lateralspread_ratio = ROOT.TGraphErrors()
ger_lateralspread_ratio.SetName(PhysicsList)
ger_lateralspread_ratio.SetTitle(PhysicsList)
# set point values of grapherrors of ratios
for i in range(N):
ger_response_ratio.SetPoint(i, Xs[i], h_response_ratio.GetBinContent(i+1))
ger_response_ratio.SetPointError(i, Xerrors[i], h_response_ratio.GetBinError(i+1))
ger_resolution_ratio.SetPoint(i, Xs[i], h_resolution_ratio.GetBinContent(i+1))
ger_resolution_ratio.SetPointError(i, Xerrors[i], h_resolution_ratio.GetBinError(i+1))
ger_lateralspread_ratio.SetPoint(i, Xs[i], h_lateralspread_ratio.GetBinContent(i+1))
ger_lateralspread_ratio.SetPointError(i, Xerrors[i], h_lateralspread_ratio.GetBinError(i+1))
ger_response_ratio.GetXaxis().SetTitle("E_{beam}[GeV]")
ger_response_ratio.GetYaxis().SetTitle("MC/Data")
ger_resolution_ratio.GetXaxis().SetTitle("E_{beam}[GeV]")
ger_resolution_ratio.GetYaxis().SetTitle("MC/Data")
ger_lateralspread_ratio.GetXaxis().SetTitle("E_{beam}[GeV]")
ger_lateralspread_ratio.GetYaxis().SetTitle("MC/Data")
outputFile.cd()
# save
ger_response_ratio.Write()
ger_resolution_ratio.Write()
ger_lateralspread_ratio.Write()
# append to list
ResponseRatioList.append(ger_response_ratio)
ResolutionRatioList.append(ger_resolution_ratio)
LateralSpreadRatioList.append(ger_lateralspread_ratio)
# draw the single ratio
DrawSingleGraphErrorsOnCanvas("{}/{}/{}_{}_Response_Ratio".format(PlotDir, Particle, Particle, PhysicsList), ger_response_ratio,"AP", False, False, False, PhysicsList)
DrawSingleGraphErrorsOnCanvas("{}/{}/{}_{}_LateralSpread_Ratio".format(PlotDir, Particle, Particle, PhysicsList), ger_lateralspread_ratio,"AP", False, False, False, PhysicsList)
DrawSingleGraphErrorsOnCanvas("{}/{}/{}_{}_Resolution_Ratio".format(PlotDir, Particle, Particle, PhysicsList), ger_resolution_ratio,"AP", False, False, False, PhysicsList)
#------------------Longitudinal Profile----------------------------
# list of longitudinal profile of all types of particles and all beam energies and all physics lists
# N = N of types of particles * N of beam energies * N of physics lists
mclongitudinalprofilelist=[]
mcnormalizedlongitudinalprofilelist=[]
mclongitudinalprofileratiolist=[]
mcnormalizedlongitudinalprofileratiolist=[]
for Energy in Energies:
# skip 20 GeV for proton
if Particle=='pr' and Energy==20000: continue
mclongitudinalprofilelist.append(inputFile.Get("{}_{}GeV_{}_LongitudinalProfile".format(Particle, Energy/1000,PhysicsList)))
mcnormalizedlongitudinalprofilelist.append(inputFile.Get("{}_{}GeV_{}_NormalizedLongitudinalProfile".format(Particle, Energy/1000, PhysicsList)))
print mclongitudinalprofilelist, mcnormalizedlongitudinalprofilelist
# get the ratios of longitudinal profiles
for i in range(len(mclongitudinalprofilelist)):
longitudinalprofilelistratio = mclongitudinalprofilelist[i].Clone()
longitudinalprofilelistratio.Divide(datalongitudinalprofilelist[i])
longitudinalprofilelistratio.SetName(longitudinalprofilelistratio.GetName()+"_Ratio")
longitudinalprofilelistratio.GetYaxis().SetTitle("MC/Data")
longitudinalprofilelistratio.GetYaxis().SetRangeUser(0.65, 1.45)
longitudinalprofilelistratio.Write()
mclongitudinalprofileratiolist.append(longitudinalprofilelistratio)
normalizedlongitudinalprofilelistratio = mcnormalizedlongitudinalprofilelist[i].Clone()
normalizedlongitudinalprofilelistratio.Divide(datanormalizedlongitudinalprofilelist[i])
normalizedlongitudinalprofilelistratio.SetName(normalizedlongitudinalprofilelistratio.GetName()+"_Ratio")
normalizedlongitudinalprofilelistratio.GetYaxis().SetTitle("MC/Data")
normalizedlongitudinalprofilelistratio.GetYaxis().SetRangeUser(0.65, 1.45)
normalizedlongitudinalprofilelistratio.Write()
mcnormalizedlongitudinalprofileratiolist.append(normalizedlongitudinalprofilelistratio)
# draw single ratio of longitudinal profiles
if Particle=="pr":
DrawSingleHistOnCanvas("{}/{}/{}_{}_{}_LongitudinalProfile_Ratio".format(PlotDir, Particle, Particle, Energies[i+1]/1000, PhysicsList),longitudinalprofilelistratio, "PE", False, False)
DrawSingleHistOnCanvas("{}/{}/{}_{}_{}_NormalizedLongitudinalProfile_Ratio".format(PlotDir, Particle, Particle, Energies[i+1]/1000, PhysicsList),normalizedlongitudinalprofilelistratio, "PE", False, False)
elif Particle=="pi":
DrawSingleHistOnCanvas("{}/{}/{}_{}_{}_LongitudinalProfile_Ratio".format(PlotDir, Particle, Particle, Energies[i]/1000, PhysicsList),longitudinalprofilelistratio, "PE", False, False)
DrawSingleHistOnCanvas("{}/{}/{}_{}_{}_NormalizedLongitudinalProfile_Ratio".format(PlotDir, Particle, Particle, Energies[i]/1000, PhysicsList),normalizedlongitudinalprofilelistratio, "PE", False, False)
# append the ratio to list
mclongitudinalprofilelists.append(mclongitudinalprofilelist)
mcnormalizedlongitudinalprofilelists.append(mcnormalizedlongitudinalprofilelist)
longitudinalprofileratiolists.append(mclongitudinalprofileratiolist)
normalizedlongitudinalprofileratiolists.append(mcnormalizedlongitudinalprofileratiolist)
FullParticleName=""
# draw rations of longitudinal profiles of all beam energies on same canvas
if Particle=='pi':
FullParticleName = "Pion"
DrawFourHistsOnCanvas("{}/{}/{}_{}_LongitudinalProfile_Ratio".format(PlotDir, Particle, Particle, PhysicsList),mclongitudinalprofileratiolist[0],mclongitudinalprofileratiolist[1],mclongitudinalprofileratiolist[2],mclongitudinalprofileratiolist[3], "PE","pesame","pesame","pesame", False, False, False, FullParticleName, PhysicsList)
DrawFourHistsOnCanvas("{}/{}/{}_{}_NormalizedLongitudinalProfile_Ratio".format(PlotDir, Particle, Particle, PhysicsList),mcnormalizedlongitudinalprofileratiolist[0],mcnormalizedlongitudinalprofileratiolist[1],mcnormalizedlongitudinalprofileratiolist[2],mcnormalizedlongitudinalprofileratiolist[3], "PE","pesame","pesame","pesame", False, False, False, FullParticleName,PhysicsList)
else:
FullParticleName = "Proton"
DrawThreeHistsOnCanvas("{}/{}/{}_{}_LongitudinalProfile_Ratio".format(PlotDir, Particle, Particle, PhysicsList),mclongitudinalprofileratiolist[0],mclongitudinalprofileratiolist[1],mclongitudinalprofileratiolist[2], "PE","pesame","pesame", False, False, False, FullParticleName, PhysicsList)
DrawThreeHistsOnCanvas("{}/{}/{}_{}_NormalizedLongitudinalProfile_Ratio".format(PlotDir, Particle, Particle, PhysicsList),mcnormalizedlongitudinalprofileratiolist[0],mcnormalizedlongitudinalprofileratiolist[1],mcnormalizedlongitudinalprofileratiolist[2], "PE","pesame","pesame", False, False, False, FullParticleName, PhysicsList)
FullParticleName=""
if Particle=='pi':
FullParticleName = "Pion"
ger_dataresponse.SetTitle("Data")
ger_dataresolution.SetTitle("Data")
ger_datalateralspread.SetTitle("Data")
elif Particle=='pr':
FullParticleName = "Proton"
ger_dataresponse.GetXaxis().SetRangeUser(40, 190)
ger_dataresolution.GetXaxis().SetRangeUser(40, 190)
ger_datalateralspread.GetXaxis().SetRangeUser(40, 190)
ger_dataresponse.SetTitle("Data")
ger_dataresolution.SetTitle("Data")
ger_datalateralspread.SetTitle("Data")
for npr in range(len(ResponseList)):
ResponseList[npr].GetXaxis().SetRangeUser(40, 190)
ResolutionList[npr].GetXaxis().SetRangeUser(40, 190)
LateralSpreadList[npr].GetXaxis().SetRangeUser(40, 190)
ResponseRatioList[npr].GetXaxis().SetRangeUser(40, 190)
ResolutionRatioList[npr].GetXaxis().SetRangeUser(40, 190)
LateralSpreadRatioList[npr].GetXaxis().SetRangeUser(40, 190)
ResponseList[npr].RemovePoint(0)
ResolutionList[npr].RemovePoint(0)
LateralSpreadList[npr].RemovePoint(0)
# draw responses, resolutions and lateral spread of all physcis lists on same canvas.
# draw responses, resolutions and lateral spread of all physcis lists and data on top,
# and ratios of MC to data on bottom
DrawFourGraphErrorsOnCanvas("{}/{}/{}_Response_Ratio".format(PlotDir, Particle, Particle),ResponseRatioList[0], ResponseRatioList[1], ResponseRatioList[2],ResponseRatioList[3], "AP","AP","AP","AP", False, False, False, FullParticleName)
DrawTopFiveGraphErrorsAndBottomFourGraphErrorsOnCanvas("{}/{}/{}_TopResponseBottomRatio".format(PlotDir, Particle, Particle),ger_dataresponse, ResponseList[0], ResponseList[1], ResponseList[2],ResponseList[3], ResponseRatioList[0], ResponseRatioList[1], ResponseRatioList[2],ResponseRatioList[3], "AP","AP","AP","AP", "AP","AP","AP","AP", "AP", False, False, False, False, FullParticleName)
DrawFourGraphErrorsOnCanvas("{}/{}/{}_Resolution_Ratio".format(PlotDir, Particle, Particle),ResolutionRatioList[0], ResolutionRatioList[1], ResolutionRatioList[2],ResolutionRatioList[3],"AP","AP","AP","AP", False, False, False,FullParticleName)
DrawTopFiveGraphErrorsAndBottomFourGraphErrorsOnCanvas("{}/{}/{}_TopResolutionBottomRatio".format(PlotDir, Particle, Particle),ger_dataresolution, ResolutionList[0], ResolutionList[1], ResolutionList[2],ResolutionList[3], ResolutionRatioList[0], ResolutionRatioList[1], ResolutionRatioList[2],ResolutionRatioList[3], "AP","AP","AP","AP", "AP","AP","AP","AP", "AP", False, False, False, False, FullParticleName)
DrawFourGraphErrorsOnCanvas("{}/{}/{}_LateralSpread_Ratio".format(PlotDir, Particle, Particle),LateralSpreadRatioList[0], LateralSpreadRatioList[1], LateralSpreadRatioList[2],LateralSpreadRatioList[3],"AP","AP","AP","AP", False, False, False,FullParticleName)
DrawTopFiveGraphErrorsAndBottomFourGraphErrorsOnCanvas("{}/{}/{}_TopLateralSpreadBottomRatio".format(PlotDir, Particle, Particle),ger_datalateralspread, LateralSpreadList[0], LateralSpreadList[1], LateralSpreadList[2],LateralSpreadList[3], LateralSpreadRatioList[0], LateralSpreadRatioList[1], LateralSpreadRatioList[2],LateralSpreadRatioList[3], "AP","AP","AP","AP", "AP","AP","AP","AP", "AP", False, False, False, False, FullParticleName)
for i in range(len(Energies)):
if Particle=="pi":
datalongitudinalprofilelist[i].GetYaxis().SetRangeUser(5E-3, 100.)
if(Energies[i]==20000):
datalongitudinalprofilelist[i].GetYaxis().SetRangeUser(5E-3, 10.)
datalongitudinalprofilelist[i].SetTitle("Data")
mclongitudinalprofilelists[0][i].SetTitle(PhysicsLists[0])
mclongitudinalprofilelists[1][i].SetTitle(PhysicsLists[1])
mclongitudinalprofilelists[2][i].SetTitle(PhysicsLists[2])
mclongitudinalprofilelists[3][i].SetTitle(PhysicsLists[3])
longitudinalprofileratiolists[0][i].SetTitle(PhysicsLists[0])
longitudinalprofileratiolists[1][i].SetTitle(PhysicsLists[1])
longitudinalprofileratiolists[2][i].SetTitle(PhysicsLists[2])
longitudinalprofileratiolists[3][i].SetTitle(PhysicsLists[3])
datanormalizedlongitudinalprofilelist[i].GetYaxis().SetRangeUser(5E-5, 1.)
datanormalizedlongitudinalprofilelist[i].SetTitle("Data")
mcnormalizedlongitudinalprofilelists[0][i].SetTitle(PhysicsLists[0])
mcnormalizedlongitudinalprofilelists[1][i].SetTitle(PhysicsLists[1])
mcnormalizedlongitudinalprofilelists[2][i].SetTitle(PhysicsLists[2])
mcnormalizedlongitudinalprofilelists[3][i].SetTitle(PhysicsLists[3])
normalizedlongitudinalprofileratiolists[0][i].SetTitle(PhysicsLists[0])
normalizedlongitudinalprofileratiolists[1][i].SetTitle(PhysicsLists[1])
normalizedlongitudinalprofileratiolists[2][i].SetTitle(PhysicsLists[2])
normalizedlongitudinalprofileratiolists[3][i].SetTitle(PhysicsLists[3])
# draw profiles of all physcis lists of each beam energy on same canvas
# draw profiles of all physcis lists of each beam energy and data on top, ratios of MC to data on bottom
DrawFiveHistsOnCanvas("{}/{}/{}_LongitudinalProfileWithData_{}GeV".format(PlotDir, Particle, Particle, Energies[i]/1000),datalongitudinalprofilelist[i],mclongitudinalprofilelists[0][i], mclongitudinalprofilelists[1][i],mclongitudinalprofilelists[2][i],mclongitudinalprofilelists[3][i], "PE", "PESame", "PESame", "PESame", "PESame", False, True, False, "Pion", "E_{beam}="+"{}GeV".format(Energies[i]/1000))
DrawFiveHistsOnCanvas("{}/{}/{}_NormalizedLongitudinalProfileWithData_{}GeV".format(PlotDir, Particle, Particle, Energies[i]/1000),datanormalizedlongitudinalprofilelist[i],mcnormalizedlongitudinalprofilelists[0][i], mcnormalizedlongitudinalprofilelists[1][i],mcnormalizedlongitudinalprofilelists[2][i],mcnormalizedlongitudinalprofilelists[3][i], "PE", "PESame", "PESame", "PESame", "PESame", False, True, False, "Pion", "E_{beam}="+"{}GeV".format(Energies[i]/1000))
DrawFourHistsOnCanvas("{}/{}/{}_LongitudinalProfile_Ratio_{}GeV".format(PlotDir, Particle, Particle, Energies[i]/1000),longitudinalprofileratiolists[0][i], longitudinalprofileratiolists[1][i],longitudinalprofileratiolists[2][i],longitudinalprofileratiolists[3][i], "PE", "PESame", "PESame", "PESame", False, False, False, "Pion", "E_{beam}="+"{}GeV".format(Energies[i]/1000))
DrawFourHistsOnCanvas("{}/{}/{}_NormalizedLongitudinalProfile_Ratio_{}GeV".format(PlotDir, Particle, Particle, Energies[i]/1000),normalizedlongitudinalprofileratiolists[0][i], normalizedlongitudinalprofileratiolists[1][i],normalizedlongitudinalprofileratiolists[2][i],normalizedlongitudinalprofileratiolists[3][i], "PE", "PESame", "PESame", "PESame", False, False, False, "Pion", "E_{beam}="+"{}GeV".format(Energies[i]/1000))
DrawTopFiveHistsAndBottomFourHistsOnCanvas("{}/{}/{}_TopLongitudinalProfileBottomRatio_{}GeV".format(PlotDir, Particle, Particle, Energies[i]/1000),datalongitudinalprofilelist[i], mclongitudinalprofilelists[0][i], mclongitudinalprofilelists[1][i],mclongitudinalprofilelists[2][i],mclongitudinalprofilelists[3][i], longitudinalprofileratiolists[0][i], longitudinalprofileratiolists[1][i],longitudinalprofileratiolists[2][i],longitudinalprofileratiolists[3][i], "PE", "PESame", "PESame", "PESame", "PESame", "PE", "PESame", "PESame", "PESame", False, True, False, False, "Pion", "E_{beam}="+"{}GeV".format(Energies[i]/1000))
DrawTopFiveHistsAndBottomFourHistsOnCanvas("{}/{}/{}_TopNormalizedLongitudinalProfileBottomRatio_{}GeV".format(PlotDir, Particle, Particle, Energies[i]/1000),datanormalizedlongitudinalprofilelist[i], mcnormalizedlongitudinalprofilelists[0][i], mcnormalizedlongitudinalprofilelists[1][i],mcnormalizedlongitudinalprofilelists[2][i],mcnormalizedlongitudinalprofilelists[3][i], normalizedlongitudinalprofileratiolists[0][i], normalizedlongitudinalprofileratiolists[1][i],normalizedlongitudinalprofileratiolists[2][i],normalizedlongitudinalprofileratiolists[3][i], "PE", "PESame", "PESame", "PESame", "PESame", "PE", "PESame", "PESame", "PESame", False, True, False, False, "Pion", "E_{beam}="+"{}GeV".format(Energies[i]/1000))
elif Particle=="pr":
# proton doesn't have beam energy og 20 GeV in data.
if Energies[i]==20000: continue
datalongitudinalprofilelist[i-1].GetYaxis().SetRangeUser(5E-3, 100.)
datalongitudinalprofilelist[i-1].SetTitle("Data")
mclongitudinalprofilelists[0][i-1].SetTitle(PhysicsLists[0])
mclongitudinalprofilelists[1][i-1].SetTitle(PhysicsLists[1])
mclongitudinalprofilelists[2][i-1].SetTitle(PhysicsLists[2])
mclongitudinalprofilelists[3][i-1].SetTitle(PhysicsLists[3])
longitudinalprofileratiolists[0][i-1].SetTitle(PhysicsLists[0])
longitudinalprofileratiolists[1][i-1].SetTitle(PhysicsLists[1])
longitudinalprofileratiolists[2][i-1].SetTitle(PhysicsLists[2])
longitudinalprofileratiolists[3][i-1].SetTitle(PhysicsLists[3])
datanormalizedlongitudinalprofilelist[i-1].GetYaxis().SetRangeUser(5E-5, 1.)
datanormalizedlongitudinalprofilelist[i-1].SetTitle("Data")
mcnormalizedlongitudinalprofilelists[0][i-1].SetTitle(PhysicsLists[0])
mcnormalizedlongitudinalprofilelists[1][i-1].SetTitle(PhysicsLists[1])
mcnormalizedlongitudinalprofilelists[2][i-1].SetTitle(PhysicsLists[2])
mcnormalizedlongitudinalprofilelists[3][i-1].SetTitle(PhysicsLists[3])
normalizedlongitudinalprofileratiolists[0][i-1].SetTitle(PhysicsLists[0])
normalizedlongitudinalprofileratiolists[1][i-1].SetTitle(PhysicsLists[1])
normalizedlongitudinalprofileratiolists[2][i-1].SetTitle(PhysicsLists[2])
normalizedlongitudinalprofileratiolists[3][i-1].SetTitle(PhysicsLists[3])
DrawFiveHistsOnCanvas("{}/{}/{}_LongitudinalProfileWithData_{}GeV".format(PlotDir, Particle, Particle, Energies[i]/1000),datalongitudinalprofilelist[i-1],mclongitudinalprofilelists[0][i-1], mclongitudinalprofilelists[1][i-1],mclongitudinalprofilelists[2][i-1], mclongitudinalprofilelists[3][i-1], "PE", "PESame", "PESame", "PESame", "PESame", False, True, False, "Proton", "E_{beam}="+"{}GeV".format(Energies[i]/1000))
DrawFiveHistsOnCanvas("{}/{}/{}_NormalizedLongitudinalProfileWithData_{}GeV".format(PlotDir, Particle, Particle, Energies[i]/1000),datanormalizedlongitudinalprofilelist[i-1],mcnormalizedlongitudinalprofilelists[0][i-1], mcnormalizedlongitudinalprofilelists[1][i-1],mcnormalizedlongitudinalprofilelists[2][i-1], mcnormalizedlongitudinalprofilelists[3][i-1], "PE", "PESame", "PESame", "PESame", "PESame", False, True, False, "Proton", "E_{beam}="+"{}GeV".format(Energies[i]/1000))
DrawFourHistsOnCanvas("{}/{}/{}_LongitudinalProfile_Ratio_{}GeV".format(PlotDir, Particle, Particle, Energies[i]/1000),longitudinalprofileratiolists[0][i-1], longitudinalprofileratiolists[1][i-1],longitudinalprofileratiolists[2][i-1], longitudinalprofileratiolists[3][i-1], "PE", "PESame", "PESame", "PESame", False, False, False, "Proton","E_{beam}="+"{}GeV".format(Energies[i]/1000))
DrawFourHistsOnCanvas("{}/{}/{}_NormalizedLongitudinalProfile_Ratio_{}GeV".format(PlotDir, Particle, Particle, Energies[i]/1000),normalizedlongitudinalprofileratiolists[0][i-1], normalizedlongitudinalprofileratiolists[1][i-1],normalizedlongitudinalprofileratiolists[2][i-1], normalizedlongitudinalprofileratiolists[3][i-1],"PE", "PESame", "PESame", "PESame", False, False, False, "Proton", "E_{beam}="+"{}GeV".format(Energies[i]/1000))
DrawTopFiveHistsAndBottomFourHistsOnCanvas("{}/{}/{}_TopLongitudinalProfileBottomRatio_{}GeV".format(PlotDir, Particle, Particle, Energies[i]/1000),datalongitudinalprofilelist[i-1], mclongitudinalprofilelists[0][i-1], mclongitudinalprofilelists[1][i-1],mclongitudinalprofilelists[2][i-1],mclongitudinalprofilelists[3][i-1], longitudinalprofileratiolists[0][i-1], longitudinalprofileratiolists[1][i-1],longitudinalprofileratiolists[2][i-1],longitudinalprofileratiolists[3][i-1], "PE", "PESame", "PESame", "PESame", "PESame", "PE", "PESame", "PESame", "PESame", False, True, False, False, "Proton", "E_{beam}="+"{}GeV".format(Energies[i]/1000))
DrawTopFiveHistsAndBottomFourHistsOnCanvas("{}/{}/{}_TopNormalizedLongitudinalProfileBottomRatio_{}GeV".format(PlotDir, Particle, Particle, Energies[i]/1000),datanormalizedlongitudinalprofilelist[i-1], mcnormalizedlongitudinalprofilelists[0][i-1], mcnormalizedlongitudinalprofilelists[1][i-1],mcnormalizedlongitudinalprofilelists[2][i-1],mcnormalizedlongitudinalprofilelists[3][i-1], normalizedlongitudinalprofileratiolists[0][i-1], normalizedlongitudinalprofileratiolists[1][i-1],normalizedlongitudinalprofileratiolists[2][i-1],normalizedlongitudinalprofileratiolists[3][i-1], "PE", "PESame", "PESame", "PESame", "PESame", "PE", "PESame", "PESame", "PESame", False, True, False, False, "Proton", "E_{beam}="+"{}GeV".format(Energies[i]/1000))
# draw data and MC on same camvas, no ratios
def ComparePhysicsList():
for Particle in Particles: # loop over particles
# mc input files containing grapherrorses of responses, resolutions and lateral spreads and histograms of longitudinal profiles
inputFile = ROOT.TFile.Open('{}/{}/Properities_{}.root'.format(ResultDir, Particle, Particle))
# data input files containing grapherrorses of responses, resolutions and lateral spreads and histograms of longitudinal profiles
inputFile2 = ROOT.TFile.Open('{}/data/data.root'.format(ResultDir))
if not inputFile:
print "File: ",inputFile.GetName()," doesn't exist!!"
continue
# list of grapherrors and responses, resolutions and lateral spreads of MC
ResponseList = []
ResolutionList = []
LateralSpreadList = []
# list of grapherrors and responses, resolutions and lateral spreads of data and MC
ResponseListWithData = []
ResolutionListWithData = []
LateralSpreadListWithData = []
# get data results
ger_dataresponse = inputFile2.Get("{}_Response".format(Particle))
ger_dataresponse.SetTitle("Data")
ger_dataresolution = inputFile2.Get("{}_Resolution".format(Particle))
ger_dataresolution.SetTitle("Data")
ger_datalateralspread = inputFile2.Get("{}_LateralSpread".format(Particle))
ger_datalateralspread.SetTitle("Data")
ResponseListWithData.append(ger_dataresponse)
ResolutionListWithData.append(ger_dataresolution)
LateralSpreadListWithData.append(ger_datalateralspread)
# loop over physics to get grapherrors and responses,
# resolutions and lateral spreads of MC
for PhysicsList in PhysicsLists:
ger_response = inputFile.Get("{}_{}_Response".format(Particle, PhysicsList))
ger_response.SetTitle(PhysicsList)
ger_resolution = inputFile.Get("{}_{}_Resolution".format(Particle, PhysicsList))
ger_resolution.SetTitle(PhysicsList)
ger_lateralspread = inputFile.Get("{}_{}_LateralSpread".format(Particle, PhysicsList))
ger_lateralspread.SetTitle(PhysicsList)
ResponseList.append(ger_response)
ResolutionList.append(ger_resolution)
LateralSpreadList.append(ger_lateralspread)
ResponseListWithData.append(ger_response)
ResolutionListWithData.append(ger_resolution)
LateralSpreadListWithData.append(ger_lateralspread)
print ResponseList,ResolutionList,LateralSpreadList
FullParticleName=""
if Particle=='pi':
FullParticleName = "Pion"
else:
FullParticleName = "Proton"
# draw results of proton of MC and data on same canvas
if len(ResponseList)==3:
DrawThreeGraphErrorsOnCanvas("{}/{}/{}_Response".format(PlotDir,Particle,Particle),ResponseList[0], ResponseList[1], ResponseList[2],"AP","AP","AP")
DrawThreeGraphErrorsOnCanvas("{}/{}/{}_Resolution".format(PlotDir,Particle,Particle),ResolutionList[0], ResolutionList[1], ResolutionList[2],"AP","AP","AP")
# draw results of pion of MC and data on same canvas
elif len(ResponseList)==4:
DrawFourGraphErrorsOnCanvas("{}/{}/{}_Response".format(PlotDir,Particle,Particle),ResponseList[0], ResponseList[1], ResponseList[2],ResponseList[3], "AP","AP","AP","AP", False, False, False,FullParticleName)
DrawFourGraphErrorsOnCanvas("{}/{}/{}_Resolution".format(PlotDir,Particle,Particle),ResolutionList[0], ResolutionList[1], ResolutionList[2],ResolutionList[3],"AP","AP","AP","AP", False, False, False, FullParticleName)
DrawFourGraphErrorsOnCanvas("{}/{}/{}_LateralSpread".format(PlotDir,Particle,Particle),LateralSpreadList[0], LateralSpreadList[1], LateralSpreadList[2],LateralSpreadList[3],"AP","AP","AP","AP", False, False, False, FullParticleName)
DrawFiveGraphErrorsOnCanvas("{}/{}/{}_ResponseWithData".format(PlotDir,Particle,Particle),ResponseListWithData[0], ResponseListWithData[1], ResponseListWithData[2],ResponseListWithData[3], ResponseListWithData[4], "AP","AP","AP","AP", "AP", False, False, False,FullParticleName)
DrawFiveGraphErrorsOnCanvas("{}/{}/{}_ResolutionWithData".format(PlotDir,Particle,Particle),ResolutionListWithData[0], ResolutionListWithData[1], ResolutionListWithData[2],ResolutionListWithData[3],ResolutionListWithData[4], "AP","AP","AP","AP", "Ap", False, False, False,FullParticleName)
DrawFiveGraphErrorsOnCanvas("{}/{}/{}_LateralSpreadWithData".format(PlotDir,Particle,Particle),LateralSpreadListWithData[0], LateralSpreadListWithData[1], LateralSpreadListWithData[2],LateralSpreadListWithData[3],LateralSpreadListWithData[4],"AP","AP","AP","AP","AP", False, False, False,FullParticleName)
if __name__ == '__main__':
GetPlotSingleProperty()
GetDataPlotSingleProperty()
ComDataMC()
ComparePhysicsList()
| [
"[email protected]"
] | |
4343b66f7f4aa4b421a9916100526490275a5e63 | e4de060c295fba0d0386d0a7678e744ced18b920 | /build/move_base_flex/mbf_costmap_core/catkin_generated/pkg.installspace.context.pc.py | d99db631ffe34a45fa7eeeca31b56d21fdb231f6 | [] | no_license | jbenzhhn/carla_hhn | af9497d01ce1f34ee0016ca660a0cc5af5f71be8 | abd803bcdd506641c8152ec994468518ea809f1b | refs/heads/master | 2023-04-05T10:50:28.934452 | 2021-04-07T14:31:41 | 2021-04-07T14:31:41 | 355,151,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "std_msgs;geometry_msgs;mbf_abstract_core;mbf_utility;tf;costmap_2d;nav_core".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "mbf_costmap_core"
PROJECT_SPACE_DIR = "/home/automotive/catkin_ws/install"
PROJECT_VERSION = "0.3.4"
| [
"[email protected]"
] | |
011004d9088a8eae9ee9471922e947f0df13e0e9 | 1d7ae7f6e7a0df98d92f9ec5f277752d14924a94 | /fake-very-small-test/tmp/Environment_jq.py | 5e6d4b61381271a2d593cd1c8cc162dbd1a0feb7 | [] | no_license | lindsaymorgan/Mobike-Bike-Sharing-System-Dispatch-Optimization-Using-Reinforcement-Learning | 1e6b1aa3c64d2ff2e31b5d9dcc4abdc11e10679c | 6c8a329fae5c2ac8db45a3d8c55b308aae8ad804 | refs/heads/master | 2023-05-02T07:39:49.089459 | 2021-05-23T02:26:14 | 2021-05-23T02:26:14 | 279,467,461 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,717 | py | import numpy as np
from copy import deepcopy
import scipy.stats as stats
class State:
def __init__(self, state, region_count, car_num, out_nums, in_nums, capacity_each_step, reward=0, t=0, reward_sum=0, R=None):
self.region_count = region_count
self.car_num = car_num
self.state = state
self.out_nums = out_nums
self.in_nums = in_nums
self.capacity_each_step = capacity_each_step
self.reward = reward
self.reward_sum = reward_sum
self._R = R
self.t = t
self.__hash = None
self.feasible_actions = np.zeros((self.region_count, 2 * self.capacity_each_step + 1))
def get_hash(self):
if not self.__hash:
self.__hash = tuple(self.state).__hash__()
return self.__hash
def __repr__(self):
return str(tuple(self.state))
@property
def region_state(self):
return self.state[:self.region_count]
@region_state.setter
def region_state(self, value):
self._R = None
self.__hash = None
self.state[:self.region_count] = value
@property
def car_pos(self):
return self.state[self.region_count:self.region_count + self.car_num]
@car_pos.setter
def car_pos(self, value):
self.state[self.region_count:self.region_count + self.car_num] = value
@property
def bike_on_car(self):
return self.state[self.region_count + self.car_num:]
@bike_on_car.setter
def bike_on_car(self, value):
self.state[self.region_count + self.car_num:] = value
@property
def R(self) -> int:
"""
:return: Reward
"""
# if self._R:
# return self._R
# self.region_state += self.in_nums[self.t,]
# self.region_state -= self.out_nums[self.t+1 ,]
# raw_R = np.sum(self.region_state[self.region_state < 0])
# self.region_state += self.out_nums[self.t+1 ,]
# self.region_state -= self.in_nums[self.t]
self.region_state += self.in_nums[self.t,]
raw_R = np.mean(
[stats.poisson.cdf(i, j) for i, j in zip(self.region_state, self.out_nums[self.t + 1,])])
self.region_state -= self.in_nums[self.t]
self._R = raw_R
return raw_R
def out_stage(self):
"""
before move happens -- external bikes depart
"""
self.region_state -= self.out_nums[self.t,]
self.region_state[self.region_state < 0] = 0
return self.region_state
def in_stage(self):
"""
after move happens -- external bikes arrive
"""
self.region_state += self.in_nums[self.t,]
self.t += 1
def check_feasible(self, current_region, current_car, move) -> bool:
"""
Return True for feasible action, False for not feasible
:param state: State object, state to check
:param current_region: index of region
:param move: number of bikes to load/unload (must be within -capacity_each_step ~ capacity_each_step)
:param current_car: index of car
:return:
"""
# \ and (tmp_obs[-self.obs_dim + region] - self.out_nums[int(current_eps+1), region]) * move <= 0
#move 正数移入区块 负数移出区块
if move + self.region_state[current_region] >= 0 and move <= self.bike_on_car[current_car]:
return True # 合法动作
else:
return False # 非法动作
def update_feasible_action(self, current_car):
for region in range(self.region_count):
for move in range(-self.capacity_each_step, self.capacity_each_step + 1):
self.feasible_actions[region, move] = self.check_feasible(region, current_car, move)
def step(self, current_region, current_car, move, prev_state_R=None):
"""
Perform move action
:param current_region:
:param current_car:
:param move:
:param prev_state_R:
:return:
"""
new_state = State(deepcopy(self.state), self.region_count,
self.car_num, self.out_nums, self.in_nums,
self.reward, self.t, self.reward_sum, self.R)
# if (move > 0 or move + new_state.region_state[current_region] >= 0) and move <= new_state.bike_on_car[current_car]:
if move + new_state.region_state[current_region] >= 0 and move <= new_state.bike_on_car[current_car]:
new_state.region_state[current_region] += move
# 更新货车状态
new_state.bike_on_car[current_car] -= move # 更新货车上的单车数
new_state.car_pos[current_car] = current_region # 更新货车位置
new_state.reward = new_state.R
if prev_state_R:
new_state.reward -= prev_state_R
new_state.reward_sum += new_state.reward
return new_state
class Env(object):
def __init__(self, initial_region_state, capacity_each_step, max_episode, car_count, need):
"""
:param initial_region_state: List, number of bikes in each region, e.g. [15, 15, 15, 15]
:param capacity_each_step: maximum number of load/unload bikes each step (only one of load/unload per step)
:param max_episode: max time
:param car_count: number of cars
:param need: external change driven by customers
"""
self.initial_region_state = initial_region_state
self.region_count = len(initial_region_state)
self.capacity_each_step = capacity_each_step
self.car_num = car_count
# length of one-hot action vector: for each region, each car can load/unload maximum transport_capacity of bike
self.action_dim = self.region_count * (2 * self.capacity_each_step + 1)
# length of state: number of bike at each region + location of each car + number of bike on each car
self.obs_dim = self.region_count + 2 * self.car_num
self.start_region = need.groupby('start_region')
self.end_region = need.groupby('end_region')
self.t_index = {i: str(i) for i in range(max_episode + 1)}
self.out_nums = np.array([need.groupby('start_region')[str(i)].agg(np.sum) for i in range(max_episode + 1)])
self.in_nums = np.array([need.groupby('end_region')[str(i)].agg(np.sum) for i in range(max_episode + 1)])
# current episode
self.t = 0
def new_state(self):
"""
Initialize state
:return:
"""
state = State(np.asarray(self.initial_region_state + [0] * self.car_num * 2), self.region_count,
self.car_num, self.out_nums, self.in_nums, self.capacity_each_step)
return state
| [
"[email protected]"
] | |
3f9e557501df5b989853196d4359ca663f35ee37 | 266b3911034ffe37f6c1c88ae4061f5792676c8b | /scripts/irods/logging_infrastructure.py | d368b224ecfbbe73002eb15b15cd538eb3162081 | [
"BSD-3-Clause"
] | permissive | trel/irods | cca485264f4189cb9fc9ce63f204faf5ff9f1ff5 | dc462b0e90f3d715546329570f5950dd425dc489 | refs/heads/master | 2022-05-20T16:51:46.864969 | 2021-10-04T17:55:26 | 2021-10-04T17:59:34 | 73,592,300 | 1 | 0 | NOASSERTION | 2021-10-04T17:59:35 | 2016-11-13T03:03:35 | C++ | UTF-8 | Python | false | false | 1,536 | py | #! /usr/bin/python
from __future__ import print_function
import os
import sys
import platform
import subprocess
import shutil
import logging
from .log import register_tty_handler
def rsyslog_config_path():
return '/etc/rsyslog.d/00-irods.conf'
def logrotate_config_path():
return '/etc/logrotate.d/irods'
def setup_rsyslog_and_logrotate(register_tty=True):
l = logging.getLogger(__name__)
l.setLevel(logging.INFO)
if register_tty:
register_tty_handler(sys.stdout, logging.INFO, logging.WARNING)
# Copy rsyslog configuration file into place if it does not exist
# and restart the rsyslog daemon so that the configuration is loaded.
dst = rsyslog_config_path()
if not os.path.isfile(dst):
l.info('Configuring rsyslog ...')
shutil.copyfile('/var/lib/irods/packaging/irods.rsyslog', dst)
l.info('done.')
l.info('Restarting rsyslog ...')
if 'Ubuntu' == platform.linux_distribution()[0]:
subprocess.call(['service', 'rsyslog', 'restart'])
else:
subprocess.call(['systemctl', 'restart', 'rsyslog'])
l.info('done.')
else:
l.info('rsyslog already configured.')
# Copy logrotate configuration file into place if it does not exist.
dst = logrotate_config_path()
if not os.path.isfile(dst):
l.info('Configuring logrotate ...')
shutil.copyfile('/var/lib/irods/packaging/irods.logrotate', dst)
l.info('done.')
else:
l.info('logrotate already configured.')
| [
"[email protected]"
] | |
356f23dcc0f34092b262caed148b54b7583618e5 | ace7e98719c756cff4e4baf7c92e546cbc0b92ca | /LeetCode/firstMissingPositive.py | 37817e06877b8d07f503696fc1fe9d2f340a9bb4 | [] | no_license | armsky/OnlineJudge | f4159326c92a794695cca8a162280fef32f95a2a | c658b78c920aa94c25b3d932cd7e46c0df82b19a | refs/heads/master | 2020-04-15T01:21:18.158217 | 2015-12-11T03:05:28 | 2015-12-11T03:05:28 | 21,989,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 867 | py | """
Given an unsorted integer array, find the first missing positive integer.
For example,
Given [1,2,0] return 3,
and [3,4,-1,1] return 2.
Your algorithm should run in O(n) time and uses constant space.
"""
class Solution:
# @param A, a list of integers
# @return an integer
def firstMissingPositive(self, A):
for i in range(len(A)):
while A[i] != i+1:
if A[i] <= 0 or A[i] > len(A) or A[i] == A[A[i]-1]:
break
else:
temp = A[A[i]-1]
A[A[i]-1] = A[i]
A[i] = temp
print A
for i in range(len(A)):
if A[i] != i+1:
return i+1
return len(A)+1
solution = Solution()
print solution.firstMissingPositive([1,2,0])
print solution.firstMissingPositive([3,4,0,2])
| [
"[email protected]"
] | |
f073c567c4891983543a7c56592a594bf7f068cc | 0e02b452a10c5adff4e988da912b385a3335aba8 | /Noun Phrase Frequencies Visualization/NPFreqSolrDash/nounphrase_visualization_yearly.py | 4e50843dc68e870f2f6700d4c61af016551c0c36 | [] | no_license | tf-dbis-uni-freiburg/arxiv-cs-analysis | 2006bd4c862ba84e137de801d37598f907a8c426 | 40180718c357ec9304e6047fdfe17fed2b22a530 | refs/heads/master | 2021-03-27T08:50:23.081860 | 2019-01-15T23:49:43 | 2019-01-15T23:49:43 | 106,251,614 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,623 | py | """ This module is used to visualize the yearly doc frequencies (no. of docs in which a phrase is present per year) and
phrase frequencies (no. of times a phrase is present per year) of noun phrase(s) chosen by the user in a Dash user interface.
A Solr query is made for the query/queries, results are aggregated yearly, and converted into percentage of phrases/docs in
the year by dividing by the total docs/phrases in each year (these are obtained from a json file built for that purpose in
another module. """
import requests
import sys
import pandas as pd
import json
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import plotly.graph_objs as go
def search_solr_parse_json(query, collection, search_field):
""" Searches the nounphrases collection on 'phrase' (query),
parses the json result and returns it as a list of dictionaries where
each dictionary corresponds to a record.
ARGUMENTS: query, string: the user's query entered in a search box
(if it is comma-separated, only one part of the query is sent
to this function).
collection: the Solr collection name (=nounphrases)
search_field: the Solr field which is queried (=phrase)
RETURNS: docs, list of dicts: the documents (records) returned by Solr
AFTER getting the JSON response and parsing it."""
solr_url = 'http://localhost:8983/solr/' + collection + '/select'
# Exact search only
query = '"' + query + '"'
# for rows, pass an arbitrarily large number.
url_params = {'q': query, 'rows': 100000, 'df': search_field}
solr_response = requests.get(solr_url, params=url_params)
if solr_response.ok:
data = solr_response.json()
docs = data['response']['docs']
return docs
else:
print("Invalid response returned from Solr")
sys.exit(11)
def dataframe_from_solr_results(documents_list):
""" Takes a list of dictionaries (each dictionary is a record) obtained by parsing
the JSON results from Solr, converts it into a dataframe, and keeps only the 4
important columns (discards _version_ and id, and also phrase, keeps published_date,
num_occurrences and arxiv_identifier). Finally, it makes sure that the published_date
is the new index.
ARGUMENTS: documents_list, list of dicts: list of documents (records) returned
by Solr for one search query
RETURNS: docs_df, Pandas dataframe, the Solr results converted into a Pandas
dataframe with index=published_date, columns=arxiv_identifier, num_occurrences"""
docs_df = pd.DataFrame(documents_list)
# Remove phrase too, as all the rows will have the same value
# (the solr query field was phrase).
docs_df.drop(['_version_', 'id', 'phrase'], axis=1, inplace=True)
# Change the published_date column from Solr's string timestamp format to a pandas
# datetime object with just dates.
docs_df.published_date = pd.to_datetime(docs_df.published_date)
# Make sure the published_date is the index. Once it is the index, we don't
# really need the column any more.
docs_df.set_index('published_date', inplace=True, drop=True)
return docs_df
def calculate_aggregates_day_wise(docs_df):
""" Takes a Pandas data frame with index=published_date, cols: num_occurrences and
arxiv_identifier as input, calculates the no. of unique and total occurrences by
grouping by published_date and cacluating the count and sum on the column
num_occurrences. The aggregate results are suitably renamed and the published_date
index is reset so that it becomes a column in the output dataframe.
NOT USED CURRENTLY"""
agg_df = docs_df.groupby('published_date').num_occurrences.agg(['sum','count']).rename(
columns={'sum':'total_occurrences','count':'unique_occurrences'}).reset_index()
#agg_df.sort_values(by='total_occurrences', ascending=False)
return agg_df
def calculate_aggregates(docs_df):
""" Takes a Pandas data frame with index=published_date, cols: num_occurrences and
arxiv_identifier as input, calculates the no. of unique and total occurrences by
grouping by the year part of published_date, and then calculating the count
and sum based on the column num_occurrences. The aggregate results are suitably
renamed and 2 dataframes (unique counts and total counts) are returned.
ARGUMENTS: docs_df, Pandas dataframe with index=published_date,
columns=num_occurrences and arxiv_identifier
RETURNS: docs_df_total, a Pandas df grouped on published_date year, on
which 'sum' is applied on num_occurrences.
docs_df_unique, a Pandas df grouped on published_date year, on
which 'count' is applied on num_occurrences.
IMPORTANT: the returned dfs have sum and count in the same column called
num_occurrences, a new sum/count column is not created.
"""
# Drop arxiv_identifier, we want to group by the published_date index, and
# aggregate on num_occurrrences.
docs_df.drop('arxiv_identifier', axis=1, inplace=True)
# Dataframe 1 takes the sum of num_occurrences after grouping by year
docs_df_total = docs_df.groupby(pd.Grouper(freq='1Y')).sum()
# docs_df_total.index has day as well, we keep only year
# Change num_occurrences to int after replacing nan by 0
docs_df_total.num_occurrences = docs_df_total.num_occurrences.fillna(0).astype('int64')
# Dataframe 2 takes the count of num_occurrences after grouping by year
# This is a yearly documnet frequency
docs_df_unique = docs_df.groupby(pd.Grouper(freq='1Y')).count()
# Change num_occurrences to int after replacing nan by 0
docs_df_unique.num_occurrences = docs_df_unique.num_occurrences.fillna(0).astype('int64')
return docs_df_total, docs_df_unique
def get_percentage_aggregates(docs_df_total, docs_df_unique):
""" This function takes 2 dataframes -- one has yearly phrase frequencies, the other has
yearly document frequencies -- and normalizes the values by dividing by total no. of phrases
in the corresponding years and total no. of documents in the corresponding year respectively,
and multiplies by 100 to get percentages .
ARGUMENTS: docs_df_total, a Pandas df grouped on published_date year, on
which 'sum' is applied on num_occurrences.
docs_df_unique, a Pandas df grouped on published_date year, on
which 'count' is applied on num_occurrences.
RETURNS: docs_df_total, the data frame in the arguments with an additional field 'percentage_occurrences'
calculated by dividing the current value for each year by the no. of phrases in that year
docs_df_unique, the data frame in the arguments with an additional field 'percentage_occurrences'
calculated by dividing the current value for each year by the no. of docs in that year
NOTE: The total no. of docs/phrases in each year is present in a json file phrases_and_docs_yearly.json """
# Read the Json file which has the yearly total phrases and documents -- 2 Json objects in a
# json array. Assign each object to a dictionary.
with open('phrases_and_docs_yearly.json', 'r') as file:
json_array= json.load(file)
# json_array is a list of 2 dicts.
yearly_phrases_total = json_array[0]
yearly_docs_total = json_array[1]
# For each of the dataframes, create a year column.This is a string and matches the value from the json file.
# Create year column as a period object with frequency = every year
docs_df_total['year'] = docs_df_total.index.to_period('Y')
# Convert the period object to a string
docs_df_total.year = docs_df_total.year.astype('str')
# Create a new column which uses the value in the year string column as a key in the yearly_phrases_total
# dict, and gets the corresponding value. The no. of occurrencesis divided by this number. The na_action is not
# strictly necessary, it is just a precaution which inserts NaN if a key (year) is not found. Finally, NaNs are
# produced if the dict value has a 0 (divide by 0). These NaNs are replaced by 0. * 100 because the final result is in %.
docs_df_total['percentage_occurrences'] = (100 * docs_df_total.num_occurrences / docs_df_total['year']
.map(yearly_phrases_total, na_action=None)).fillna(0)
# Repeat the process for docs_df_unique
docs_df_unique['year'] = docs_df_unique.index.to_period('Y')
# Convert the period object to a string
docs_df_unique.year = docs_df_unique.year.astype('str')
docs_df_unique['percentage_occurrences'] = (100 * docs_df_unique.num_occurrences / docs_df_unique['year']
.map(yearly_docs_total, na_action=None)).fillna(0)
return docs_df_total, docs_df_unique
def get_aggregated_data(query):
""" Function which returns an aggregated function for a valid query and
None for an invalid one.
ARGUMENTS: query, string, one of the parts of the user's comma-separated query
RETURNS: docs_df_total, a Pandas df grouped on published_date year, on
which 'sum' is applied on num_occurrences and then normalized to get a percentage.
docs_df_unique, a Pandas df grouped on published_date year, on
which 'count' is applied on num_occurrences and then normalized to get a percentage.
"""
# Get a list of dictinoaries by parsing the JSON results for the search query
docs = search_solr_parse_json(query, "nounphrases", "phrase")
if docs == []:
# No data found
return None, None
# Create a pandas dataframe out of the result
docs_df = dataframe_from_solr_results(docs)
# Group by published_date, and calculate sum and count of num_occurrences.
#These correspond to total_occurrences of a phrase for a date, and unique
# occurrences of a phrase for a date.
docs_df_total, docs_df_unique = calculate_aggregates(docs_df)
docs_df_total, docs_df_unique = get_percentage_aggregates(docs_df_total, docs_df_unique)
return docs_df_total, docs_df_unique
app = dash.Dash()
# Add the default Dash CSS, and some custom (very simple) CSS to remove the undo button
# app.css.append_css({'external_url': 'https://www.jsdelivr.com/package/npm/normalize.css'})
#app.css.append_css({'external_url': 'https://unpkg.com/sakura.css/css/sakura.css'})
app.css.append_css({'external_url': 'https://codepen.io/chriddyp/pen/bWLwgP.css'})
app.css.append_css({'external_url': 'https://rawgit.com/lwileczek/Dash/master/undo_redo5.css'})
# Black background, blue text
#colours = {
# 'background': '#111111',
# 'text': '#0080A5'
#}
# White background, blue text
colours = {
'background': '#ffffff',
'text': '#0080A5'
}
app.layout = html.Div(style={'backgroundColor': colours['background'],
'height':'100vh', 'width': '100%'},
children=[
html.H2(children='Distribution of Noun phrases over time',
style={
'textAlign': 'center',
'color': colours['text']
}
),
html.Label('Graph these comma-separated noun phrases: ',
style={
'textAlign': 'left',
'color': colours['text'],
'fontSize': '1.4em'
}),
dcc.Input(id='npinput1-state', value='', type='text'),
html.Button(id='submit-button', n_clicks=0, children='Submit'),
html.Div(id='output_total'),
html.Div(id='output_unique')
])
def not_found_message(notfound_list):
""" Takes a list of elements not found in the Solr index and produces
an error message for the whole lot of them together, along with suitable
styling (in an <h3> tag).
ARGUMENTS: notfound_list: list of user's search terms which are not found
in the Solr index
RETURNS: a html h5 message with a message listing the terms not found"""
notfound_list = ['"' + term.strip().capitalize() + '"'
for term in notfound_list]
notfound = ','.join(notfound_list)
return html.H5('Noun phrases not found: {}.'.format(notfound),
style={'color': colours['text']}
)
""" Trigger callback to show graph for total occurrences for all the comma-separated
# search terms when n_clicks of the button is incremented """
@app.callback(Output('output_total', 'children'),
[Input('submit-button', 'n_clicks')],
[State('npinput1-state', 'value')])
def show_graph_total(n_clicks, input_box):
""" Wrapped function which takes user input in a text box, returns a graph
if the query produces a hit in Solr, returns an error message otherwise.
ARGUMENTS: n_clicks: a parameter of the HTML button which indicates it has
been clicked
input_box: the content of the text box in which the user has
entered a comma-separated search query.
RETURNS: 1 graph (total occurrences) of all terms which have results from
Solr, error messages of all terms which don't have results from Solr."""
# Store the layout with the appropriate title and y axis labels for the graph
layout_total = go.Layout(
title = 'Percentage of occurrences of chosen noun phrase(s) per Year',
xaxis = {'title': 'Publication year', 'tickformat': '%Y', 'tick0': '2007-12-31',
'dtick': 'M12', 'range': ['2007-07-01', '2018-07-01']},
yaxis = {'title': 'Percentage of phrase occurrences', 'ticksuffix': '%'},
plot_bgcolor = colours['background'],
paper_bgcolor = colours['background'],
barmode = 'stack',
hovermode = 'closest',
font= {
'color': colours['text']
},
showlegend=True
)
if input_box != '':
# Get the input data: both freq_df dfs will have index= published_date,
# columns = percentage_occurrences total.
input_list = input_box.lower().split(',')
data_list_total = []
notfound_list = []
for input_val in input_list:
# Make sure to strip input_val, otherwise if the user enters a
# space after the comma in the query, this space will get sent
# to Solr.
freq_df_total, freq_df_unique = get_aggregated_data(input_val.strip())
if freq_df_total is not None:
# Plot the graphs, published_date (index) goes on the x-axis,
# and percentage_occurrences total goes on the y-axis.
data_list_total.append(go.Bar(
x = freq_df_total.index,
y = freq_df_total.percentage_occurrences,
text = input_val.strip().capitalize(),
opacity = 0.7,
name = input_val.strip().capitalize()
))
else:
# Term not found, append it to the not found list and go to the
# next term.
notfound_list.append(input_val)
if data_list_total == []:
if notfound_list != []:
# Append the error message for the terms not found in the
# Solr index
return not_found_message(notfound_list)
# One or more of the Solr queries returned a result
else:
#graph_total_terms = {'data': data_list_total, 'layout': layout_total}
graph_total_terms = dict(data=data_list_total, layout=layout_total)
if notfound_list != []:
terms_not_found = not_found_message(notfound_list)
#return terms_not_found, html.Br(),
return terms_not_found, dcc.Graph(id='totalfreq', figure= graph_total_terms)
return html.Br(), dcc.Graph(id='totalfreq', figure= graph_total_terms)
""" Trigger callback to show graph for unique occurrences for all the comma-separated
# search terms when n_clicks of the button is incremented """
@app.callback(Output('output_unique', 'children'),
[Input('submit-button', 'n_clicks')],
[State('npinput1-state', 'value')])
def show_graph_unique(n_clicks, input_box):
""" Wrapped function which takes user input in a text box, returns a graph
if the query produces a hit in Solr.
ARGUMENTS: n_clicks: a parameter of the HTML button which indicates it has
been clicked
input_box: the content of the text box in which the user has
entered a comma-separated search query.
RETURNS: 1 graph (unique occurrences) of all terms which have results
from Solr """
# Store the layout with the appropriate title and y axis labels for the graph
layout_unique = go.Layout(
title = 'Percentage of papers containing chosen noun phrase(s) per Year',
xaxis = {'title': 'Publication year', 'tickformat': '%Y', 'tick0': '2007-12-31',
'dtick': 'M12', 'range': ['2007-07-01', '2018-07-01']},
yaxis = {'title': 'Percentage of papers with noun phrase', 'ticksuffix': '%'},
plot_bgcolor = colours['background'],
paper_bgcolor = colours['background'],
barmode = 'stack',
hovermode = 'closest',
font= {
'color': colours['text']
},
showlegend=True
)
if input_box != '':
# Get the input data: both freq_df dfs will have index= published_date,
# columns = percentage_occurrences unique.
input_list = input_box.lower().split(',')
data_list_unique = []
notfound_list = []
for input_val in input_list:
# Make sure to strip input_val, otherwise if the user enters a
# space after the comma in the query, this space will get sent
# to Solr.
freq_df_total, freq_df_unique = get_aggregated_data(input_val.strip())
if freq_df_unique is not None:
# Plot the graphs, published_date (index) goes on the x-axis,
# and percentage_occurrences (unique) goes on the y-axis.
data_list_unique.append(go.Bar(
x = freq_df_unique.index,
y = freq_df_unique.percentage_occurrences,
text = input_val.strip().capitalize(),
opacity = 0.7,
name = input_val.strip().capitalize()
))
else:
# Term not found, append it to the not found list and go to the
# next term.
notfound_list.append(input_val)
if data_list_unique == []:
if notfound_list != []:
# Append the error message for the terms not found in the
# Solr index
return html.Br()
# One or more of the Solr queries returned a result
else:
graph_unique_terms = {'data': data_list_unique, 'layout': layout_unique}
if notfound_list != []:
return dcc.Graph(id='uniquefreq', figure= graph_unique_terms)
return html.Br(), dcc.Graph(id='uniquefreq', figure= graph_unique_terms)
def show_graph_total_not_callback(n_clicks, input_box):
""" Function which is called by a wrapped function in another module. It takes
user input in a text box, returns a graph if the query produces a hit in Solr.
Returns an error message otherwise.
ARGUMENTS: n_clicks: a parameter of the HTML button which indicates it has
been clicked
input_box: the content of the text box in which the user has
entered a comma-separated search query.
RETURNS: 1 graph (total occurrences) of all terms which have results from
Solr, error messages of all terms which don't have results from Solr."""
# Store the layout with the appropriate title and y axis labels for the graph
layout_total = go.Layout(
title = 'Percentage of occurrences of chosen noun phrase(s) per Year',
xaxis = {'title': 'Publication year', 'tickformat': '%Y', 'tick0': '2007-12-31',
'dtick': 'M12', 'range': ['2007-07-01', '2018-07-01']},
yaxis = {'title': 'Percentage of phrase occurrences', 'ticksuffix': '%'},
plot_bgcolor = colours['background'],
paper_bgcolor = colours['background'],
barmode = 'stack',
hovermode = 'closest',
font= {
'color': colours['text']
},
showlegend=True
)
if input_box != '':
# Get the input data: both freq_df dfs will have index= published_date,
# columns = percentage_occurrences total.
input_list = input_box.lower().split(',')
data_list_total = []
notfound_list = []
for input_val in input_list:
# Make sure to strip input_val, otherwise if the user enters a
# space after the comma in the query, this space will get sent
# to Solr.
freq_df_total, freq_df_unique = get_aggregated_data(input_val.strip())
if freq_df_total is not None:
# Plot the graphs, published_date (index) goes on the x-axis,
# and percentage_occurrences total goes on the y-axis.
data_list_total.append(go.Bar(
x = freq_df_total.index,
y = freq_df_total.percentage_occurrences,
text = input_val.strip().capitalize(),
opacity = 0.7,
name = input_val.strip().capitalize()
))
else:
# Term not found, append it to the not found list and go to the
# next term.
notfound_list.append(input_val)
if data_list_total == []:
if notfound_list != []:
# Append the error message for the terms not found in the
# Solr index
return not_found_message(notfound_list)
# One or more of the Solr queries returned a result
else:
#graph_total_terms = {'data': data_list_total, 'layout': layout_total}
graph_total_terms = dict(data=data_list_total, layout=layout_total)
if notfound_list != []:
terms_not_found = not_found_message(notfound_list)
#return terms_not_found, html.Br(),
return terms_not_found, dcc.Graph(id='totalfreq', figure= graph_total_terms)
return html.Br(), dcc.Graph(id='totalfreq', figure= graph_total_terms)
def show_graph_unique_not_callback(n_clicks, input_box):
""" Function which is called by a wrapped function in another module. It takes
user input in a text box, returns a graph if the query produces a hit in Solr.
Returns an error message otherwise.
ARGUMENTS: n_clicks: a parameter of the HTML button which indicates it has
been clicked
input_box: the content of the text box in which the user has
entered a comma-separated search query.
RETURNS: 1 graph (unique occurrences) of all terms which have results
from Solr """
# Store the layout with the appropriate title and y axis labels for the graph
layout_unique = go.Layout(
title = 'Percentage of papers containing chosen noun phrase(s) per Year',
xaxis = {'title': 'Publication year', 'tickformat': '%Y', 'tick0': '2007-12-31',
'dtick': 'M12', 'range': ['2007-07-01', '2018-07-01'], 'titlefont': {'size': 20}, 'tickfont': {'size': 18}},
yaxis = {'title': 'Percentage of papers with noun phrase', 'ticksuffix': '%', 'titlefont': {'size': 20}, 'tickfont': {'size': 18}},
plot_bgcolor = colours['background'],
paper_bgcolor = colours['background'],
barmode = 'stack',
hovermode = 'closest',
font= {
'color': colours['text'],
'size': 15
},
showlegend=True,
legend = {'font': {'size': 20}}
)
if input_box != '':
# Get the input data: both freq_df dfs will have index= published_date,
# columns = percentage_occurrences unique.
input_list = input_box.lower().split(',')
data_list_unique = []
notfound_list = []
for input_val in input_list:
# Make sure to strip input_val, otherwise if the user enters a
# space after the comma in the query, this space will get sent
# to Solr.
freq_df_total, freq_df_unique = get_aggregated_data(input_val.strip())
if freq_df_unique is not None:
# Plot the graphs, published_date (index) goes on the x-axis,
# and percentage_occurrences (unique) goes on the y-axis.
data_list_unique.append(go.Bar(
x = freq_df_unique.index,
y = freq_df_unique.percentage_occurrences,
text = input_val.strip().capitalize(),
opacity = 0.7,
name = input_val.strip().capitalize()
))
else:
# Term not found, append it to the not found list and go to the
# next term.
notfound_list.append(input_val)
if data_list_unique == []:
if notfound_list != []:
# Append the error message for the terms not found in the
# Solr index
return not_found_message(notfound_list)
# One or more of the Solr queries returned a result
else:
graph_unique_terms = {'data': data_list_unique, 'layout': layout_unique}
if notfound_list != []:
terms_not_found = not_found_message(notfound_list)
return terms_not_found, dcc.Graph(id='uniquefreq', figure= graph_unique_terms)
return html.Br(), dcc.Graph(id='uniquefreq', figure= graph_unique_terms)
if __name__ == '__main__':
app.run_server(host='0.0.0.0') | [
"[email protected]"
] | |
ca9dd99a19a954466f615b47a1b8dbff97ea320d | bc7ae358f8932d2bc5358c70f6c1edd92905aeb9 | /transparent test.py | 2cb32e0157490b5231ac64cbc8bc1f60fe954ddd | [] | no_license | Aaron-Hsiao1/Python_Sylvan | 415b383bebff619062ee6ef70339ec6a1fe43965 | b293e36caf215be7721cf869b494d0fd4860e5b2 | refs/heads/master | 2023-07-19T19:04:55.211094 | 2021-08-21T19:34:03 | 2021-08-21T19:34:03 | 323,982,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,297 | py | import pygame
pygame.init()
X = 570
Y = 900
screen_width = 1000
screen_height = 1000
Width = 30
Height = 30
Speed = 8
looping = True
screen = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption("blank")
while looping:
pygame.time.delay(5)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
# def blit_alpha(target, source, opacity):
# x = 100
# y = 100
# temp = pygame.Surface((source.get_width(), source.get_height())).convert()
# temp.blit(target, (-x, -y))
# temp.blit(source, (100, 100))
# temp.set_alpha(50)
# target.blit(temp, (l00,100))
keys = pygame.key.get_pressed()
if keys[pygame.K_UP]:
Y -= Speed
if keys[pygame.K_DOWN]:
Y += Speed
if keys[pygame.K_LEFT]:
X -= Speed
if keys[pygame.K_RIGHT]:
X += Speed
screen.fill((0, 0, 0))
pygame.draw.rect(screen, (0,255,0), (X, Y, Width, Height))
s = pygame.Surface((500,500)) # the size of your rect
s.set_alpha(150) # alpha level
s.fill((255,255,255)) # this fills the entire surface
screen.blit(s, (250,250)) # (0,0) are the top-left coordinates
pygame.display.update() | [
"[email protected]"
] | |
8ffaa6d47074616ebc145529b8fae389dbe8f338 | fb55adfc901176c1bae6914b51c0eedc7eab44a3 | /tasks.py | 79acd1db41870a008b6336a3f9a68773cdc68847 | [
"MIT"
] | permissive | yijiangh/coop_assembly | b82ab7b17b956ff33beafe329a48c083cfb7f940 | 71108b0639323cf3d996d63b0f702d45f4d60d67 | refs/heads/master | 2023-04-03T07:36:36.444159 | 2020-02-05T16:40:08 | 2020-02-05T16:40:08 | 228,839,363 | 8 | 0 | MIT | 2019-12-18T12:51:08 | 2019-12-18T12:51:06 | null | UTF-8 | Python | false | false | 6,917 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import codecs
import contextlib
import glob
import os
import sys
from shutil import rmtree
from xml.dom.minidom import parse
from invoke import Collection, Exit, task
# For automatic doc deployment
# from paramiko import SSHClient
# from paramiko.client import AutoAddPolicy
# from scp import SCPClient
try:
input = raw_input
except NameError:
pass
BASE_FOLDER = os.path.dirname(__file__)
PACKAGE_NAME = 'coop_assembly'
class Log(object):
def __init__(self, out=sys.stdout, err=sys.stderr):
self.out = out
self.err = err
def flush(self):
self.out.flush()
self.err.flush()
def write(self, message):
self.flush()
self.out.write(message + '\n')
self.out.flush()
def info(self, message):
self.write('[INFO] %s' % message)
def warn(self, message):
self.write('[WARN] %s' % message)
log = Log()
def confirm(question):
while True:
response = input(question).lower().strip()
if not response or response in ('n', 'no'):
return False
if response in ('y', 'yes'):
return True
print('Focus! It is either (y)es or (n)o', file=sys.stderr)
@task(default=True)
def help(ctx):
"""Lists available tasks and usage."""
ctx.run('invoke --list')
log.write('Use "invoke -h <taskname>" to get detailed help for a task.')
@task(help={
'docs': 'True to generate documentation, otherwise False',
'bytecode': 'True to clean up compiled python files, otherwise False.',
'builds': 'True to clean up build/packaging artifacts, otherwise False.'})
def clean(ctx, docs=True, bytecode=True, builds=True):
"""Cleans the local copy from compiled artifacts."""
with chdir(BASE_FOLDER):
if builds:
ctx.run('python setup.py clean')
if bytecode:
for root, dirs, files in os.walk(BASE_FOLDER):
for f in files:
if f.endswith('.pyc'):
os.remove(os.path.join(root, f))
if '.git' in dirs:
dirs.remove('.git')
folders = []
if docs:
folders.append('docs/_build/')
folders.append('dist/')
if bytecode:
folders.append('src/{}/__pycache__'.format(PACKAGE_NAME))
if builds:
folders.append('build/')
folders.append('src/{}.egg-info/'.format(PACKAGE_NAME))
for folder in folders:
rmtree(os.path.join(BASE_FOLDER, folder), ignore_errors=True)
@task(help={
'rebuild': 'True to clean all previously built docs before starting, otherwise False.',
'doctest': 'True to run doctest snippets, otherwise False.',
# 'check_links': 'True to check all web links in docs for validity, otherwise False.'
})
def docs(ctx, rebuild=False, doctest=False): #, check_links=False):
"""Builds package's HTML documentation."""
with chdir(BASE_FOLDER):
if rebuild:
clean(ctx)
if doctest:
ctx.run('sphinx-build -b doctest docs dist/docs/{}'.format(PACKAGE_NAME))
ctx.run('sphinx-build -b html docs dist/docs/{}'.format(PACKAGE_NAME))
# if check_links:
# ctx.run('sphinx-build -b linkcheck -c docs . dist/docs/{}'.format(PACKAGE_NAME))
# @task()
# def deploy_docs(ctx, scp_server='darch.ethz.ch'):
# """Deploy docs to the documentation server.
#
# Published to: xxx address"""
#
# DOCS_PATH = os.path.join(BASE_FOLDER, 'dist', 'docs', PACKAGE_NAME)
# with chdir(DOCS_PATH):
# scp_username = os.environ.get('SCP_USERNAME')
# scp_password = os.environ.get('SCP_PASSWORD')
#
# print('Connecting to {} as {}...'.format(scp_server, scp_username))
#
# with SSHClient() as ssh:
# ssh.set_missing_host_key_policy(AutoAddPolicy)
# ssh.connect(scp_server, username=scp_username, password=scp_password)
#
# scp = SCPClient(ssh.get_transport())
# scp.put(DOCS_PATH, recursive=True, remote_path='htdocs')
#
# print('Done')
@task()
def check(ctx):
"""Check the consistency of documentation, coding style and a few other things."""
with chdir(BASE_FOLDER):
log.write('Checking ReStructuredText formatting...')
ctx.run('python setup.py check --strict --metadata --restructuredtext')
# log.write('Running flake8 python linter...')
# ctx.run('flake8 src setup.py')
# log.write('Checking python imports...')
# ctx.run('isort --check-only --diff --recursive src tests setup.py')
# log.write('Checking MANIFEST.in...')
# ctx.run('check-manifest')
@task(help={
'checks': 'True to run all checks before testing, otherwise False.',
'build': 'test build, default to false',
})
def test(ctx, checks=True, build=False):
"""Run all tests."""
with chdir(BASE_FOLDER):
if checks:
check(ctx)
if build:
log.write('Checking build')
ctx.run('python setup.py clean --all sdist bdist_wheel') #bdist_wheel
if sys.platform == 'win32':
ctx.run('powershell -Command "& pip install --verbose $(ls dist/*.tar.gz | % {$_.FullName})"')
else:
ctx.run('pip install --verbose dist/*.tar.gz')
log.write('Running pytest')
ctx.run('pytest --doctest-modules --cov={} tests --cov-report term-missing'.format(PACKAGE_NAME))
@task(help={
'release_type': 'Type of release follows semver rules. Must be one of: major, minor, patch.',
'bump_version': 'Bumpversion, true or false, default to false'})
def release(ctx, release_type, bump_version=False):
"""Releases the project in one swift command!"""
if release_type not in ('patch', 'minor', 'major'):
raise Exit('The release type parameter is invalid.\nMust be one of: major, minor, patch')
with chdir(BASE_FOLDER):
if bump_version:
ctx.run('bumpversion %s --verbose' % release_type)
ctx.run('invoke docs test')
ctx.run('python setup.py clean --all sdist bdist_wheel')
if confirm('You are about to upload the release to pypi.org. Are you sure? [y/N]'):
files = ['dist/*.whl', 'dist/*.gz', 'dist/*.zip']
dist_files = ' '.join([pattern for f in files for pattern in glob.glob(f)])
if len(dist_files):
ctx.run('twine upload --skip-existing %s' % dist_files)
else:
raise Exit('No files found to release')
else:
raise Exit('Aborted release')
@contextlib.contextmanager
def chdir(dirname=None):
current_dir = os.getcwd()
try:
if dirname is not None:
os.chdir(dirname)
yield
finally:
os.chdir(current_dir)
| [
"[email protected]"
] | |
fb3e5fb2d325fe624ac277cba857887429060102 | 325fde42058b2b82f8a4020048ff910cfdf737d7 | /src/storage-blob-preview/azext_storage_blob_preview/vendored_sdks/azure_storage_blob/v2019_12_12/_shared/authentication.py | b11dc57578087edc183689b24d68b515df7a6a00 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | ebencarek/azure-cli-extensions | 46b0d18fe536fe5884b00d7ffa30f54c7d6887d1 | 42491b284e38f8853712a5af01836f83b04a1aa8 | refs/heads/master | 2023-04-12T00:28:44.828652 | 2021-03-30T22:34:13 | 2021-03-30T22:34:13 | 261,621,934 | 2 | 5 | MIT | 2020-10-09T18:21:52 | 2020-05-06T01:25:58 | Python | UTF-8 | Python | false | false | 5,199 | py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import logging
import sys
try:
from urllib.parse import urlparse, unquote
except ImportError:
from urlparse import urlparse # type: ignore
from urllib2 import unquote # type: ignore
try:
from yarl import URL
except ImportError:
pass
try:
from azure.core.pipeline.transport import AioHttpTransport
except ImportError:
AioHttpTransport = None
from azure.core.exceptions import ClientAuthenticationError
from azure.core.pipeline.policies import SansIOHTTPPolicy
from . import sign_string
logger = logging.getLogger(__name__)
# wraps a given exception with the desired exception type
def _wrap_exception(ex, desired_type):
msg = ""
if ex.args:
msg = ex.args[0]
if sys.version_info >= (3,):
# Automatic chaining in Python 3 means we keep the trace
return desired_type(msg)
# There isn't a good solution in 2 for keeping the stack trace
# in general, or that will not result in an error in 3
# However, we can keep the previous error type and message
# TODO: In the future we will log the trace
return desired_type('{}: {}'.format(ex.__class__.__name__, msg))
class AzureSigningError(ClientAuthenticationError):
"""
Represents a fatal error when attempting to sign a request.
In general, the cause of this exception is user error. For example, the given account key is not valid.
Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info.
"""
# pylint: disable=no-self-use
class SharedKeyCredentialPolicy(SansIOHTTPPolicy):
def __init__(self, account_name, account_key):
self.account_name = account_name
self.account_key = account_key
super(SharedKeyCredentialPolicy, self).__init__()
@staticmethod
def _get_headers(request, headers_to_sign):
headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value)
if 'content-length' in headers and headers['content-length'] == '0':
del headers['content-length']
return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n'
@staticmethod
def _get_verb(request):
return request.http_request.method + '\n'
def _get_canonicalized_resource(self, request):
uri_path = urlparse(request.http_request.url).path
try:
if isinstance(request.context.transport, AioHttpTransport) or \
isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport):
uri_path = URL(uri_path)
return '/' + self.account_name + str(uri_path)
except TypeError:
pass
return '/' + self.account_name + uri_path
@staticmethod
def _get_canonicalized_headers(request):
string_to_sign = ''
x_ms_headers = []
for name, value in request.http_request.headers.items():
if name.startswith('x-ms-'):
x_ms_headers.append((name.lower(), value))
x_ms_headers.sort()
for name, value in x_ms_headers:
if value is not None:
string_to_sign += ''.join([name, ':', value, '\n'])
return string_to_sign
@staticmethod
def _get_canonicalized_resource_query(request):
sorted_queries = list(request.http_request.query.items())
sorted_queries.sort()
string_to_sign = ''
for name, value in sorted_queries:
if value is not None:
string_to_sign += '\n' + name.lower() + ':' + unquote(value)
return string_to_sign
def _add_authorization_header(self, request, string_to_sign):
try:
signature = sign_string(self.account_key, string_to_sign)
auth_string = 'SharedKey ' + self.account_name + ':' + signature
request.http_request.headers['Authorization'] = auth_string
except Exception as ex:
# Wrap any error that occurred as signing error
# Doing so will clarify/locate the source of problem
raise _wrap_exception(ex, AzureSigningError)
def on_request(self, request):
string_to_sign = \
self._get_verb(request) + \
self._get_headers(
request,
[
'content-encoding', 'content-language', 'content-length',
'content-md5', 'content-type', 'date', 'if-modified-since',
'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range'
]
) + \
self._get_canonicalized_headers(request) + \
self._get_canonicalized_resource(request) + \
self._get_canonicalized_resource_query(request)
self._add_authorization_header(request, string_to_sign)
#logger.debug("String_to_sign=%s", string_to_sign)
| [
"[email protected]"
] | |
02356b4c95f1fe3d5b2b19948af77d16597c26f6 | dffb9a8855adecc4bd4d21b9168a4e9bdc75e508 | /arrandmatrix/q9.py | e71297c66dbcfdb2e98b4e05b14779bcc6c7eb44 | [
"MIT"
] | permissive | ResolveWang/algorithm_qa | 95278c4459e38c55225304210770efb61d934fcc | a0cb649acaf8cf9d808272bc15f1951f2c05c828 | refs/heads/master | 2021-07-17T09:32:01.845815 | 2018-08-13T13:54:42 | 2018-08-13T13:54:42 | 100,107,109 | 90 | 29 | null | null | null | null | UTF-8 | Python | false | false | 1,873 | py | """
问题描述:给定一个排序数组arr和整数k,不重复打印arr中所有相加和为k的不降序二元组。
例如,arr=[-8, -4, -3, 0, 1, 2, 4, 5, 8, 9], k=10,打印结果为:
1,9
2,8
补充题目:
给定排序数组arr和整数k,不重复打印arr中所有相加和为k的不降序三元组。
例如,arr=[-8, -4, -3, 0, 1, 2, 4, 5, 8, 9], k=10,打印结果为:
-4, 5, 9
-3, 4, 9
-3, 5, 8
0, 1, 9
0, 2, 8
1, 4, 5
"""
class KnumOfSum:
@classmethod
def get_two_tuple_of_sum(cls, arr, k, print_value=False):
if not arr or len(arr) == 1:
return
left = 0
right = len(arr) - 1
res = []
while left < right:
left_value = arr[left]
right_value = arr[right]
if left_value + right_value == k:
if left > 0 and arr[left-1] == arr[left]:
pass
else:
left += 1
right -= 1
if print_value:
print(left_value, right_value)
res.append((left_value, right_value))
elif left_value + right_value < k:
left += 1
else:
right -= 1
return res
@classmethod
def get_three_tuple_of_sum(cls, arr, k):
if not arr or len(arr) < 3:
return
for i in range(len(arr)):
new_k = k - arr[i]
if i > 0 and arr[i] == arr[i-1]:
continue
else:
res = cls.get_two_tuple_of_sum(arr[i+1:], new_k)
if res:
for x, y in res:
print(arr[i], x, y)
if __name__ == '__main__':
my_arr = [-8, -4, -3, 0, 1, 2, 2, 4, 5, 8, 9]
KnumOfSum.get_two_tuple_of_sum(my_arr, 10, True)
KnumOfSum.get_three_tuple_of_sum(my_arr, 10) | [
"[email protected]"
] | |
055160178fb7c1930792888ea5adaa86078353fc | 6ecf40c771874f31fa19f9534677c95c731352e9 | /SnapLimitReconstructor_Old.py | a392ca94928b1415a51d86e123387a049c797a00 | [] | no_license | fstakem/OptNetFilt | 1c86e82629352ee8ee9eb270aa3be01c6202696d | c862f3d1dd09ad9cdd146871761cb1f814e82de9 | refs/heads/master | 2016-09-11T03:06:06.466206 | 2014-06-07T02:03:04 | 2014-06-07T02:03:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,503 | py | # .---------------------------------------------------------------------------.
# | |
# | S N A P L I M I T R E C O N S T R U C T O R |
# | |
# '---------------------------------------------------------------------------'
import pdb
import inspect
from copy import *
from enum import Enum
from Globals import *
from Vector import Vector
from Sample import Sample
from PredictionSample import PredictionSample
class SnapLimitReconstructor(object):
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# P U B L I C F U N C T I O N S
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def __init__(self):
# Data
self.rawSignal = []
self.reconstructedSignal = []
# Algorithm parameters
self.samplingInterval = 10
self.interpolationType = InterpolationType.Time
self.threshold = 60
self.heartbeatRate = 500
self.snapLimit = 0.5
def getReconstructedSignal(self, rawSignal=[], samplingInterval=10,
interpolationType=InterpolationType.Time,
threshold=60, heartbeatRate=500, snapLimit=0.5):
if isinstance( rawSignal, list ):
self.rawSignal = rawSignal
if isinstance( samplingInterval, int ) and samplingInterval > 0:
self.samplingInterval = samplingInterval
if isinstance( interpolationType, Enum ):
self.interpolationType = interpolationType
if (isinstance( threshold, float ) and threshold > 0) or \
(isinstance(threshold, int ) and threshold > 0):
self.threshold = threshold
if isinstance( heartbeatRate, int ) and heartbeatRate > 0:
self.heartbeatRate = heartbeatRate
if isinstance( snapLimit, float ) and snapLimit > 0:
self.snapLimit = snapLimit
self.pullDataFromPackets()
self.executeAlgorithm()
return self.reconstructedSignal
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# P R I V A T E F U N C T I O N S
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def pullDataFromPackets(self):
temp = []
for packet in self.rawSignal:
temp.append(packet.predictionSample)
self.rawSignal = temp
def executeAlgorithm(self):
self.reconstructedSignal = []
self.reconstructedSignal.append( self.findFirstSample() )
reconstructionTime = self.reconstructedSignal[0].time + self.samplingInterval
interpolationSample = PredictionSample(self.reconstructedSignal[0],
self.rawSignal[0].velocity)
targetSample = None
for index, predictionSample in enumerate(self.rawSignal[1:]):
currentTime = predictionSample.sample.time
if currentTime < reconstructionTime:
targetSample = None
interpolationSample = predictionSample
elif currentTime == reconstructionTime:
estimatedSample = self.estimateSample(interpolationSample,
reconstructionTime)
targetSample = self.findTarget(predictionSample)
interpolationSample = self.findSnapSample(predictionSample, estimatedSample, targetSample)
self.reconstructedSignal.append(deepcopy(interpolationSample.sample))
reconstructionTime += self.samplingInterval
elif currentTime > reconstructionTime:
while currentTime > reconstructionTime:
if targetSample != None and reconstructionTime >= targetSample.sample.time:
interpolationSample = targetSample
targetSample = None
estimatedSample = self.estimateSample(interpolationSample,
reconstructionTime)
self.reconstructedSignal.append(deepcopy(estimatedSample.sample))
reconstructionTime += self.samplingInterval
if currentTime < reconstructionTime:
targetSample = None
interpolationSample = predictionSample
elif currentTime == reconstructionTime:
estimatedSample = self.estimateSample(interpolationSample,
reconstructionTime)
targetSample = self.findTarget(predictionSample)
interpolationSample = self.findSnapSample(predictionSample, estimatedSample, targetSample)
self.reconstructedSignal.append(deepcopy(interpolationSample.sample))
reconstructionTime += self.samplingInterval
def findFirstSample(self):
timeDiff = self.rawSignal[0].sample.time % self.samplingInterval
if timeDiff == 0:
return deepcopy(self.rawSignal[0].sample)
else:
change = self.samplingInterval - timeDiff
newSample = Sample()
newSample.time = self.rawSignal[0].sample.time + change
newSample.position = deepcopy(self.rawSignal[0].sample.position)
return newSample
def findTarget(self, predictionSample):
if self.interpolationType == InterpolationType.Time:
return self.findTargetForTimeThreshold(predictionSample)
elif self.interpolationType == Interpolation.Distance:
return self.findTargetForDistanceThreshold(predictionSample)
def findTargetForTimeThreshold(self, predictionSample):
time = min(self.threshold, self.heartbeatRate)
targetSample = self.estimateSample(predictionSample, predictionSample.sample.time + time)
return targetSample
def findTargetForDistanceThreshold(self, predictionSample):
distance = 0
targetSample = None
time = predictionSample.sample.time
timeDiff = 0
while distance < self.threshold and timeDiff < self.heartbeatRate:
time += self.samplingInterval
timeDiff = time - predictionSample.sample.time
targetSample = self.estimateSample(predictionSample, time)
distance = predictionSample.sample.position.distance(target.sample.position)
return targetSample
def findInterpolationSample(self, currentSample, targetSample):
deltaPosition = targetSample.sample.position - \
currentSample.sample.position
deltaTime = targetSample.sample.time - \
currentSample.sample.time
invDeltaTimeVector = Vector( 1 / float(deltaTime), \
1 / float(deltaTime), \
1 / float(deltaTime))
velocity = deltaPosition * invDeltaTimeVector
interpolationSample = PredictionSample()
interpolationSample.sample = deepcopy(currentSample.sample)
interpolationSample.velocity = velocity
return interpolationSample
def findSnapSample(self, currentSample, estimatedSample, targetSample):
deltaPosition = targetSample.sample.position - \
currentSample.sample.position
deltaPosition.x *= self.snapLimit
deltaPosition.y *= self.snapLimit
deltaPosition.z *= self.snapLimit
snapPosition = currentSample.sample.position + deltaPosition
deltaPosition = targetSample.sample.position - snapPosition
deltaTime = targetSample.sample.time - \
currentSample.sample.time
invDeltaTimeVector = Vector( 1 / float(deltaTime), \
1 / float(deltaTime), \
1 / float(deltaTime))
velocity = deltaPosition * invDeltaTimeVector
snapSample = PredictionSample()
snapSample.sample = Sample(currentSample.sample.time, snapPosition)
snapSample.velocity = velocity
return snapSample
def estimateSample(self, interpolationSample, time):
estimatedSample = PredictionSample()
estimatedSample.sample.time = time
estimatedSample.sample.position = self.calculatePosition(interpolationSample, time)
estimatedSample.velocity = deepcopy(interpolationSample.velocity)
return estimatedSample
def calculatePosition(self, interpolationSample, time):
deltaTime = time - interpolationSample.sample.time
if deltaTime < 0:
print "Error at: " + str(interpolationSample.sample.time) + " " + str(time)
return deepcopy(interpolationSample.sample.position)
elif deltaTime == 0:
return deepcopy(interpolationSample.sample.position)
else:
deltaTimeVector = Vector(deltaTime, deltaTime, deltaTime)
deltaPosition = interpolationSample.velocity * deltaTimeVector
estimatedPosition = interpolationSample.sample.position + deltaPosition
return estimatedPosition
| [
"[email protected]"
] | |
c533a3df7e8cc711386467ffd3b763470cc473f5 | 41e3065d6f29449251f1cc79cb340fa273ac5c61 | /0x07-python-test_driven_development/2-matrix_divided.py | 863f0dc62cc8b16f472da3460c0908a01aaa3834 | [] | no_license | BD20171998/holbertonschool-higher_level_programming | 856fa3a7fcfafd3e17ebd7dd4cf9d3e5a609fd1f | bfa78d25bd4527e06cf1bf54cbc00722449d9a30 | refs/heads/master | 2021-07-16T01:58:42.911959 | 2020-11-15T07:18:19 | 2020-11-15T07:18:19 | 226,976,859 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,359 | py | #!/usr/bin/python3
"""
This is an example of the matrix_divided function.
>>> matrix = [[1, 2, 3], [4, 5, 6]]
>>> print(matrix_divided(matrix, 3))
[[0.33, 0.67, 1.0], [1.33, 1.67, 2.0]]
"""
def matrix_divided(matrix, div):
"""
This function divides a matrix by an integer or float and returns a new
matrix divided by that number
"""
if (matrix == [] or matrix[0] == []):
raise TypeError('matrix must be a matrix (list of lists) of '
'integers/floats')
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if (
type(matrix[i][j]) is not int and
type(matrix[i][j]) is not float
):
raise TypeError('matrix must be a matrix (list of lists) of '
'integers/floats')
x = len(matrix[0])
for i in range(1, len(matrix)):
if len(matrix[i]) != x:
raise TypeError('Each row of the matrix must have the same size')
if div == 0:
raise ZeroDivisionError('division by zero')
if type(div) is not int and type(div) is not float:
raise TypeError('div must be a number')
newmat = matrix[:]
newmat = [
[float(round(newmat[i][j]/div, 2)) for j in range(len(newmat[i]))]
for i in range(len(newmat))]
return newmat
| [
"[email protected]"
] | |
a42ee1435bedd3c4a0940ee63ffcaa7c0387407f | 612325535126eaddebc230d8c27af095c8e5cc2f | /depot_tools/git_upstream_diff.py | cc0a2006e585c1a00a443d4763335359281640b6 | [
"BSD-3-Clause"
] | permissive | TrellixVulnTeam/proto-quic_1V94 | 1a3a03ac7a08a494b3d4e9857b24bb8f2c2cd673 | feee14d96ee95313f236e0f0e3ff7719246c84f7 | refs/heads/master | 2023-04-01T14:36:53.888576 | 2019-10-17T02:23:04 | 2019-10-17T02:23:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,410 | py | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import sys
import subprocess2
import git_common as git
def main(args):
default_args = git.config_list('depot-tools.upstream-diff.default-args')
args = default_args + args
parser = argparse.ArgumentParser()
parser.add_argument('--wordwise', action='store_true', default=False,
help=(
'Print a colorized wordwise diff '
'instead of line-wise diff'))
opts, extra_args = parser.parse_known_args(args)
cur = git.current_branch()
if not cur or cur == 'HEAD':
print 'fatal: Cannot perform git-upstream-diff while not on a branch'
return 1
par = git.upstream(cur)
if not par:
print 'fatal: No upstream configured for branch \'%s\'' % cur
return 1
cmd = [git.GIT_EXE, 'diff', '--patience', '-C', '-C']
if opts.wordwise:
cmd += ['--word-diff=color', r'--word-diff-regex=(\w+|[^[:space:]])']
cmd += [git.get_or_create_merge_base(cur, par)]
cmd += extra_args
return subprocess2.check_call(cmd)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
| [
"[email protected]"
] | |
2b55e707ba9ff684d0159528ae983b83dceb3b6d | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_22433.py | a406e0709451fe6478ee3ca985fe25f91ec65b95 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | # How to disable button until check box is checked in pyqt?
connect(checkbox, SIGNAL(stateChanged(int)), button, SLOT(buttonStateChanged(int)));
| [
"[email protected]"
] | |
c5d99d7ef7f2e3f44b277992637c51bbbcdc00d1 | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/EightTeV/TprimeTprime/TprimeTprimeToTHTHinc_M_625_TuneZ2star_8TeV-madgraph_cff.py | 4d932f949768263c528bf82c33419cee1ca13447 | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 5,115 | py | import FWCore.ParameterSet.Config as cms
#from Configuration.Generator.PythiaUEZ2Settings_cfi import *
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
generator = cms.EDFilter("Pythia6HadronizerFilter",
pythiaHepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(0),
comEnergy = cms.double(8000.0),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring(
'PMAS(25,1)=125.00D0 !mass of Higgs',
'MSTP(1) = 4',
'MSEL=8 ! fourth generation (t4) fermions',
'MWID(8)=2',
'MSTJ(1)=1 ! Fragmentation/hadronization on or off',
'MSTP(61)=1 ! Parton showering on or off',
'PMAS(5,1)=4.8 ! b quark mass', #from Spring11 4000040
'PMAS(6,1)=172.5 ! t quark mass', #from Spring11 4000040
'PMAS(8,1) = 625.0D0 ! tprime quarks mass',
'PMAS(8,2) = 6.25D0',
'PMAS(8,3) = 62.5D0',
'VCKM(1,1) = 0.97414000D0',
'VCKM(1,2) = 0.22450000D0',
'VCKM(1,3) = 0.00420000D0',
'VCKM(1,4) = 0.02500000D0',
'VCKM(2,1) = 0.22560000D0',
'VCKM(2,2) = 0.97170000D0',
'VCKM(2,3) = 0.04109000D0',
'VCKM(2,4) = 0.05700000D0',
'VCKM(3,1) = 0.00100000D0',
'VCKM(3,2) = 0.06200000D0',
'VCKM(3,3) = 0.91000000D0',
'VCKM(3,4) = 0.41000000D0',
'VCKM(4,1) = 0.01300000D0',
'VCKM(4,2) = 0.04000000D0',
'VCKM(4,3) = 0.41000000D0',
'VCKM(4,4) = 0.91000000D0',
'MDME(66,1)=0 ! g t4',
'MDME(67,1)=0 ! gamma t4',
'MDME(68,1)=0 ! Z0 t (2 : on for particle, off for anti-particle) ',
'MDME(69,1)=0 ! W d',
'MDME(70,1)=0 ! W s',
'MDME(71,1)=0 ! W b (3 : off for particle, on for particle) ',
'MDME(72,1)=0 ! W b4',
'KFDP(73,2)=6 ! defines H0 t',
'MDME(73,1)=1 ! h0 t4',
'MDME(74,1)=-1 ! H+ b',
'MDME(75,1)=-1 ! H+ b4',
'BRAT(66) = 0.0D0',
'BRAT(67) = 0.0D0',
'BRAT(68) = 0.0D0',
'BRAT(69) = 0.0D0',
'BRAT(70) = 0.0D0',
'BRAT(71) = 0.0D0',
'BRAT(72) = 0.0D0',
'BRAT(73) = 1.0D0',
'BRAT(74) = 0.0D0',
'BRAT(75) = 0.0D0',
'MDME(174,1)=1 !Z decay into d dbar',
'MDME(175,1)=1 !Z decay into u ubar',
'MDME(176,1)=1 !Z decay into s sbar',
'MDME(177,1)=1 !Z decay into c cbar',
'MDME(178,1)=1 !Z decay into b bbar',
'MDME(179,1)=1 !Z decay into t tbar',
'MDME(180,1)=-1 !Z decay into b4 b4bar',
'MDME(181,1)=-1 !Z decay into t4 t4bar',
'MDME(182,1)=1 !Z decay into e- e+',
'MDME(183,1)=1 !Z decay into nu_e nu_ebar',
'MDME(184,1)=1 !Z decay into mu- mu+',
'MDME(185,1)=1 !Z decay into nu_mu nu_mubar',
'MDME(186,1)=1 !Z decay into tau- tau+',
'MDME(187,1)=1 !Z decay into nu_tau nu_taubar',
'MDME(188,1)=-1 !Z decay into tau4 tau4bar',
'MDME(189,1)=-1 !Z decay into nu_tau4 nu_tau4bar',
'MDME(190,1)=1 !W decay into u dbar',
'MDME(191,1)=1 !W decay into c dbar',
'MDME(192,1)=1 !W decay into t dbar',
'MDME(193,1)=-1 !W decay into t4 dbar',
'MDME(194,1)=1 !W decay into u sbar',
'MDME(195,1)=1 !W decay into c sbar',
'MDME(196,1)=1 !W decay into t sbar',
'MDME(197,1)=-1 !W decay into t4 sbar',
'MDME(198,1)=1 !W decay into u bbar',
'MDME(199,1)=1 !W decay into c bbar',
'MDME(200,1)=1 !W decay into t bbar',
'MDME(201,1)=-1 !W decay into t4 bbar',
'MDME(202,1)=-1 !W decay into u b4bar',
'MDME(203,1)=-1 !W decay into c b4bar',
'MDME(204,1)=-1 !W decay into t b4bar',
'MDME(205,1)=-1 !W decay into t4 b4bar',
'MDME(206,1)=1 !W decay into e- nu_e',
'MDME(207,1)=1 !W decay into mu nu_mu',
'MDME(208,1)=1 !W decay into tau nu_tau',
'MDME(209,1)=-1 !W decay into tau4 nu_tau4'),
# This is a vector of ParameterSet names to be read, in this order
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
),
jetMatching = cms.untracked.PSet(
scheme = cms.string("Madgraph"),
mode = cms.string("auto"), # soup, or "inclusive" / "exclusive"
MEMAIN_etaclmax = cms.double(5.0),
MEMAIN_qcut = cms.double(-1),
MEMAIN_nqmatch = cms.int32(-1),
MEMAIN_minjets = cms.int32(-1),
MEMAIN_maxjets = cms.int32(-1),
MEMAIN_showerkt = cms.double(0),
MEMAIN_excres = cms.string(''),
outTree_flag = cms.int32(0)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"[email protected]"
] | |
476f7082b9b260260f80644006a9ada67ee8fcb7 | 25b914aecd6b0cb49294fdc4f2efcfdf5803cc36 | /homeassistant/components/color_extractor/__init__.py | 73e8e09101c38ea10b25ef93ae14eb5b08dbdd04 | [
"Apache-2.0"
] | permissive | jason0x43/home-assistant | 9114decaa8f7c2f1582f84e79dc06736b402b008 | 8bf6aba1cf44ee841de063755c935ea78040f399 | refs/heads/dev | 2023-03-04T01:14:10.257593 | 2022-01-01T12:11:56 | 2022-01-01T12:11:56 | 230,622,861 | 1 | 1 | Apache-2.0 | 2023-02-22T06:15:07 | 2019-12-28T14:45:43 | Python | UTF-8 | Python | false | false | 4,528 | py | """Module for color_extractor (RGB extraction from images) component."""
import asyncio
import io
import logging
from PIL import UnidentifiedImageError
import aiohttp
import async_timeout
from colorthief import ColorThief
import voluptuous as vol
from homeassistant.components.light import (
ATTR_RGB_COLOR,
DOMAIN as LIGHT_DOMAIN,
LIGHT_TURN_ON_SCHEMA,
SERVICE_TURN_ON as LIGHT_SERVICE_TURN_ON,
)
from homeassistant.core import ServiceCall
from homeassistant.helpers import aiohttp_client
import homeassistant.helpers.config_validation as cv
from .const import ATTR_PATH, ATTR_URL, DOMAIN, SERVICE_TURN_ON
_LOGGER = logging.getLogger(__name__)
# Extend the existing light.turn_on service schema
SERVICE_SCHEMA = vol.All(
cv.has_at_least_one_key(ATTR_URL, ATTR_PATH),
cv.make_entity_service_schema(
{
**LIGHT_TURN_ON_SCHEMA,
vol.Exclusive(ATTR_PATH, "color_extractor"): cv.isfile,
vol.Exclusive(ATTR_URL, "color_extractor"): cv.url,
}
),
)
def _get_file(file_path):
"""Get a PIL acceptable input file reference.
Allows us to mock patch during testing to make BytesIO stream.
"""
return file_path
def _get_color(file_handler) -> tuple:
"""Given an image file, extract the predominant color from it."""
color_thief = ColorThief(file_handler)
# get_color returns a SINGLE RGB value for the given image
color = color_thief.get_color(quality=1)
_LOGGER.debug("Extracted RGB color %s from image", color)
return color
async def async_setup(hass, hass_config):
"""Set up services for color_extractor integration."""
async def async_handle_service(service_call: ServiceCall) -> None:
"""Decide which color_extractor method to call based on service."""
service_data = dict(service_call.data)
try:
if ATTR_URL in service_data:
image_type = "URL"
image_reference = service_data.pop(ATTR_URL)
color = await async_extract_color_from_url(image_reference)
elif ATTR_PATH in service_data:
image_type = "file path"
image_reference = service_data.pop(ATTR_PATH)
color = await hass.async_add_executor_job(
extract_color_from_path, image_reference
)
except UnidentifiedImageError as ex:
_LOGGER.error(
"Bad image from %s '%s' provided, are you sure it's an image? %s",
image_type,
image_reference,
ex,
)
return
if color:
service_data[ATTR_RGB_COLOR] = color
await hass.services.async_call(
LIGHT_DOMAIN, LIGHT_SERVICE_TURN_ON, service_data, blocking=True
)
hass.services.async_register(
DOMAIN,
SERVICE_TURN_ON,
async_handle_service,
schema=SERVICE_SCHEMA,
)
async def async_extract_color_from_url(url):
"""Handle call for URL based image."""
if not hass.config.is_allowed_external_url(url):
_LOGGER.error(
"External URL '%s' is not allowed, please add to 'allowlist_external_urls'",
url,
)
return None
_LOGGER.debug("Getting predominant RGB from image URL '%s'", url)
# Download the image into a buffer for ColorThief to check against
try:
session = aiohttp_client.async_get_clientsession(hass)
async with async_timeout.timeout(10):
response = await session.get(url)
except (asyncio.TimeoutError, aiohttp.ClientError) as err:
_LOGGER.error("Failed to get ColorThief image due to HTTPError: %s", err)
return None
content = await response.content.read()
with io.BytesIO(content) as _file:
_file.name = "color_extractor.jpg"
_file.seek(0)
return _get_color(_file)
def extract_color_from_path(file_path):
"""Handle call for local file based image."""
if not hass.config.is_allowed_path(file_path):
_LOGGER.error(
"File path '%s' is not allowed, please add to 'allowlist_external_dirs'",
file_path,
)
return None
_LOGGER.debug("Getting predominant RGB from file path '%s'", file_path)
_file = _get_file(file_path)
return _get_color(_file)
return True
| [
"[email protected]"
] | |
1b7ce49e1222b0fe960952fc254de4119bfb963e | 80b7f2a10506f70477d8720e229d7530da2eff5d | /uhd_restpy/testplatform/sessions/ixnetwork/topology/spbsimedgetopologylist_5e9b551439bb252c1fa3e2c6948a1432.py | 82f7b45a6002ca0d6bd4fe30fda2e1c885a047d8 | [
"MIT"
] | permissive | OpenIxia/ixnetwork_restpy | 00fdc305901aa7e4b26e4000b133655e2d0e346a | c8ecc779421bffbc27c906c1ea51af3756d83398 | refs/heads/master | 2023-08-10T02:21:38.207252 | 2023-07-19T14:14:57 | 2023-07-19T14:14:57 | 174,170,555 | 26 | 16 | MIT | 2023-02-02T07:02:43 | 2019-03-06T15:27:20 | Python | UTF-8 | Python | false | false | 10,573 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from uhd_restpy.base import Base
from uhd_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class SpbSimEdgeTopologyList(Base):
"""SPB Simulated Edge Topology Configuration
The SpbSimEdgeTopologyList class encapsulates a required spbSimEdgeTopologyList resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'spbSimEdgeTopologyList'
_SDM_ATT_MAP = {
'Active': 'active',
'BaseVIDCount': 'baseVIDCount',
'CistExternalRootCost': 'cistExternalRootCost',
'CistRootId': 'cistRootId',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'Name': 'name',
'NumberOfPorts': 'numberOfPorts',
'PortIdentifier': 'portIdentifier',
'TopologyId': 'topologyId',
'Vbit': 'vbit',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(SpbSimEdgeTopologyList, self).__init__(parent, list_op)
@property
def SpbSimEdgeBaseVidList(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.spbsimedgebasevidlist_166a7ab8274498ee804810aa449de276.SpbSimEdgeBaseVidList): An instance of the SpbSimEdgeBaseVidList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.spbsimedgebasevidlist_166a7ab8274498ee804810aa449de276 import SpbSimEdgeBaseVidList
if len(self._object_properties) > 0:
if self._properties.get('SpbSimEdgeBaseVidList', None) is not None:
return self._properties.get('SpbSimEdgeBaseVidList')
return SpbSimEdgeBaseVidList(self)._select()
@property
def Active(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Activate/Deactivate Configuration
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def BaseVIDCount(self):
# type: () -> int
"""
Returns
-------
- number: Base VID Count(multiplier)
"""
return self._get_attribute(self._SDM_ATT_MAP['BaseVIDCount'])
@property
def CistExternalRootCost(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): CIST External Root Cost
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CistExternalRootCost']))
@property
def CistRootId(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): CIST Root Identifier
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CistRootId']))
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NumberOfPorts(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Number of Ports
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NumberOfPorts']))
@property
def PortIdentifier(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Port Identifier
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PortIdentifier']))
@property
def TopologyId(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Topology Id
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TopologyId']))
@property
def Vbit(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable V Bit
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Vbit']))
def update(self, Name=None):
# type: (str) -> SpbSimEdgeTopologyList
"""Updates spbSimEdgeTopologyList resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, BaseVIDCount=None, Count=None, DescriptiveName=None, Name=None):
# type: (int, int, str, str) -> SpbSimEdgeTopologyList
"""Finds and retrieves spbSimEdgeTopologyList resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve spbSimEdgeTopologyList resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all spbSimEdgeTopologyList resources from the server.
Args
----
- BaseVIDCount (number): Base VID Count(multiplier)
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with matching spbSimEdgeTopologyList resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of spbSimEdgeTopologyList data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the spbSimEdgeTopologyList resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, Active=None, CistExternalRootCost=None, CistRootId=None, NumberOfPorts=None, PortIdentifier=None, TopologyId=None, Vbit=None):
"""Base class infrastructure that gets a list of spbSimEdgeTopologyList device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- CistExternalRootCost (str): optional regex of cistExternalRootCost
- CistRootId (str): optional regex of cistRootId
- NumberOfPorts (str): optional regex of numberOfPorts
- PortIdentifier (str): optional regex of portIdentifier
- TopologyId (str): optional regex of topologyId
- Vbit (str): optional regex of vbit
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| [
"[email protected]"
] | |
dba9130365a99707f7878bd20cab0e0664a2c04d | b55bfe14e10ff28569eebab73c3b1e509eec4e1a | /Starfish/model.py | 66f94063e69d1c6205e57d013bbd465d0ef8dc74 | [
"BSD-3-Clause"
] | permissive | ellipticalboy/Starfish | da7e440e9f433b32da0cc37030a636954e3942fb | 9d3c99021b7b7c4c90020162258801c590f2f11e | refs/heads/master | 2021-01-18T02:46:16.934126 | 2015-09-23T09:21:05 | 2015-09-23T09:21:05 | 34,518,507 | 0 | 0 | null | 2015-04-24T12:52:56 | 2015-04-24T12:52:55 | null | UTF-8 | Python | false | false | 34,513 | py | import numpy as np
from emcee import GibbsSampler, ParallelSampler
from . import constants as C
from .grid_tools import Interpolator
from .spectrum import ModelSpectrum, ModelSpectrumHA
import json
import h5py
import logging
import matplotlib.pyplot as plt
from itertools import zip_longest
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def plot_walkers(filename, samples, labels=None):
ndim = len(samples[0, :])
figsize = (12, ndim * 1.8)
fig, ax = plt.subplots(nrows=ndim, sharex=True, figsize=figsize)
for i in range(ndim):
ax[i].plot(samples[:,i])
if labels is not None:
ax[i].set_ylabel(labels[i])
ax[-1].set_xlabel("Sample number")
fig.savefig(filename)
plt.close(fig)
class ModelEncoder(json.JSONEncoder):
'''
Designed to serialize an instance of o=Model() to JSON
'''
def default(self, o):
try:
#We turn Model into a hierarchical dictionary, which will serialize to JSON
mydict = {"stellar_tuple":o.stellar_tuple, "cheb_tuple": o.cheb_tuple, "cov_tuple": o.cov_tuple,
"region_tuple": o.region_tuple, "stellar_params": o.stellar_params, "orders": {}}
#Determine the list of orders
orders = o.DataSpectrum.orders
#for each order, then instantiate an order dictionary
for i,order in enumerate(orders):
#Will eventually be mydict['orders'] = {"22":order_dict, "23:order_dict, ...}
order_dict = {}
order_model = o.OrderModels[i]
order_dict["cheb"] = order_model.cheb_params
order_dict["global_cov"] = order_model.global_cov_params
#Now determine if we need to add any regions
order_dict["regions"] = order_model.get_regions_dict()
mydict['orders'].update({str(order): order_dict})
except TypeError:
pass
else:
return mydict
# Let the base class default method raise the TypeError, if there is one
return json.JSONEncoder.default(self, o)
class Model:
'''
Container class to create and bring together all of the relevant data and models to aid in evaulation.
:param DataSpectrum: the data to fit
:type DataSpectrum: :obj:`spectrum.DataSpectrum` object
:param Instrument: the instrument with which the data was acquired
:type Instrument: :obj:`grid_tools.Instrument` object
:param HDF5Interface: the interface to the synthetic stellar library
:type HDF5Interface: :obj:`grid_tools.HDF5Interface` object
:param stellar_tuple: describes the order of parameters. If ``alpha`` is missing, :obj:``grid_tools.Interpolator`` is trilinear.
:type stellar_tuple: tuple
'''
@classmethod
def from_json(cls, filename, DataSpectrum, Instrument, HDF5Interface, ErrorHDF5Interface):
'''
Instantiate from a JSON file.
'''
#Determine tuples from the JSON output
f = open(filename, "r")
read = json.load(f)
f.close()
#Read DataSpectrum, Instrument, HDF5Interface, stellar_tuple, cov_tuple, and region_tuple
stellar_tuple = tuple(read['stellar_tuple'])
cheb_tuple = tuple(read['cheb_tuple'])
cov_tuple = tuple(read['cov_tuple'])
region_tuple = tuple(read['region_tuple'])
#Initialize the Model object
model = cls(DataSpectrum, Instrument, HDF5Interface, ErrorHDF5Interface, stellar_tuple=stellar_tuple,
cheb_tuple=cheb_tuple, cov_tuple=cov_tuple, region_tuple=region_tuple)
#Update all of the parameters so covariance matrix uploads
#1) update stellar parameters
model.update_Model(read['stellar_params'])
#2) Figure out how many orders, and for each order
orders_dict = read["orders"]
#print("orders_dict is", orders_dict)
orders = [int(i) for i in orders_dict.keys()]
orders.sort()
fake_priors = {"sigma0": 5., "mu_width": 2., "sigma_knee" : 150, "frac_global":0.5}
for i, order in enumerate(orders):
order_model = model.OrderModels[i]
order_dict = orders_dict[str(order)]
#print("order_dict is", order_dict)
#2.1) update cheb and global cov parametersorder_dict = orders_dict[order]
order_model.update_Cheb(order_dict['cheb'])
order_model.update_Cov(order_dict['global_cov'])
#2.2) instantiate and create all regions, if any exist
regions_dict = order_dict['regions']
regions = [int(i) for i in regions_dict.keys()]
regions.sort()
if len(regions_dict) > 0:
#Create regions, otherwise skip
CovMatrix = order_model.CovarianceMatrix
for i, region in enumerate(regions):
print("creating region ", i, region, regions_dict[str(region)])
CovMatrix.create_region(regions_dict[str(region)], fake_priors)
#Now update the stellar model again so it accounts for the Chebyshevs when downsampling
model.update_Model(read['stellar_params'])
return model
def __init__(self, DataSpectrum, Instrument, Emulator, stellar_tuple,
cheb_tuple, cov_tuple, region_tuple, outdir="", max_v=20, ismaster=False, debug=False):
self.DataSpectrum = DataSpectrum
self.ismaster = ismaster #Is this the first model instantiated?
self.stellar_tuple = stellar_tuple
self.cheb_tuple = cheb_tuple
self.cov_tuple = cov_tuple
self.region_tuple = region_tuple
self.outdir = outdir
self.debug = debug
self.orders = self.DataSpectrum.orders
self.norders = self.DataSpectrum.shape[0]
#Determine whether `alpha` is in the `stellar_tuple`, then choose trilinear.
if 'alpha' not in self.stellar_tuple:
trilinear = True
else:
trilinear = False
Emulator.determine_chunk_log(self.DataSpectrum.wls.flatten()) #Possibly truncate the grid
self.ModelSpectrum = ModelSpectrum(Emulator, self.DataSpectrum, Instrument)
self.stellar_params = None
self.stellar_params_last = None
self.logPrior = 0.0
self.logPrior_last = 0.0
self.logger = logging.getLogger(self.__class__.__name__)
if self.debug:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
#Now create a a list which contains an OrderModel for each order
self.OrderModels = [OrderModel(self.ModelSpectrum, self.DataSpectrum, index, max_v=max_v,
npoly=len(self.cheb_tuple), debug=self.debug)
for index in range(self.norders)]
def zip_stellar_p(self, p):
return dict(zip(self.stellar_tuple, p))
def zip_Cheb_p(self, p):
return dict(zip(self.cheb_tuple, p))
def zip_Cov_p(self, p):
return dict(zip(self.cov_tuple, p))
def zip_Region_p(self, p):
return dict(zip(self.region_tuple, p))
def update_Model(self, params):
'''
Update the model to reflect the stellar parameters
'''
self.stellar_params_last = self.stellar_params
self.stellar_params = params
self.ModelSpectrum.update_all(params)
#print("ModelSpectrum.update_all")
if self.ismaster:
self.logPrior = self.evaluate_logPrior(params)
#Since the ModelSpectrum fluxes have been updated, also update the interpolation errors
#print("Sum of errors is {}".format(np.sum(model_errs)))
# for orderModel in self.OrderModels:
# errs = self.ModelSpectrum.downsampled_errors[:, orderModel.index, :].copy()
# assert errs.flags["C_CONTIGUOUS"], "Not C contiguous"
# orderModel.CovarianceMatrix.update_interp_errs(errs)
def revert_Model(self):
'''
Undo the most recent change to the stellar parameters
'''
#Reset stellar_params
self.stellar_params = self.stellar_params_last
if self.ismaster:
self.logPrior = self.logPrior_last
#Reset downsampled flux
self.ModelSpectrum.revert_flux()
#Since the interp_errors have been updated, revert them now
# for orderModel in self.OrderModels:
# orderModel.CovarianceMatrix.revert_interp()
def get_data(self):
'''
Returns a DataSpectrum object.
'''
return self.DataSpectrum
def evaluate(self):
'''
Compare the different data and models.
'''
self.logger.debug("evaluating model {}".format(self))
lnps = np.empty((self.norders,))
for i in range(self.norders):
#Evaluate using the current CovarianceMatrix
lnps[i] = self.OrderModels[i].evaluate()
return np.sum(lnps) + self.logPrior
def evaluate_logPrior(self, params):
'''
Define the prior here
'''
logg = params["logg"]
logg_prior = -0.5 * (logg - 5.0)**2/(0.05)**2
return logg_prior
def to_json(self, fname="model.json"):
'''
Write all of the available parameters to a JSON file so that we may go back and re-create the model.
'''
f = open(self.outdir + fname, 'w')
json.dump(self, f, cls=ModelEncoder, indent=2, sort_keys=True)
f.close()
class ModelHA:
'''
This is for testing purposes.
Container class to create and bring together all of the relevant data and models to aid in evaulation.
:param DataSpectrum: the data to fit
:type DataSpectrum: :obj:`spectrum.DataSpectrum` object
:param Instrument: the instrument with which the data was acquired
:type Instrument: :obj:`grid_tools.Instrument` object
:param HDF5Interface: the interface to the synthetic stellar library
:type HDF5Interface: :obj:`grid_tools.HDF5Interface` object
:param stellar_tuple: describes the order of parameters. If ``alpha`` is missing, :obj:``grid_tools.Interpolator`` is trilinear.
:type stellar_tuple: tuple
'''
@classmethod
def from_json(cls, filename, DataSpectrum, Instrument, HDF5Interface):
'''
Instantiate from a JSON file.
'''
#Determine tuples from the JSON output
f = open(filename, "r")
read = json.load(f)
f.close()
#Read DataSpectrum, Instrument, HDF5Interface, stellar_tuple, cov_tuple, and region_tuple
stellar_tuple = tuple(read['stellar_tuple'])
cheb_tuple = tuple(read['cheb_tuple'])
cov_tuple = tuple(read['cov_tuple'])
region_tuple = tuple(read['region_tuple'])
#Initialize the Model object
model = cls(DataSpectrum, Instrument, HDF5Interface, stellar_tuple=stellar_tuple, cheb_tuple=cheb_tuple,
cov_tuple=cov_tuple, region_tuple=region_tuple)
#Update all of the parameters so covariance matrix uploads
#1) update stellar parameters
model.update_Model(read['stellar_params'])
#2) Figure out how many orders, and for each order
orders_dict = read["orders"]
#print("orders_dict is", orders_dict)
orders = [int(i) for i in orders_dict.keys()]
orders.sort()
for i, order in enumerate(orders):
order_model = model.OrderModels[i]
order_dict = orders_dict[str(order)]
#print("order_dict is", order_dict)
#2.1) update cheb and global cov parametersorder_dict = orders_dict[order]
order_model.update_Cheb(order_dict['cheb'])
order_model.update_Cov(order_dict['global_cov'])
#2.2) instantiate and create all regions, if any exist
regions_dict = order_dict['regions']
regions = [int(i) for i in regions_dict.keys()]
regions.sort()
if len(regions_dict) > 0:
#Create regions, otherwise skip
CovMatrix = order_model.CovarianceMatrix
for i, region in enumerate(regions):
print("creating region ", i, region, regions_dict[str(region)])
CovMatrix.create_region(regions_dict[str(region)])
#Now update the stellar model again so it accounts for the Chebyshevs when downsampling
model.update_Model(read['stellar_params'])
return model
def __init__(self, DataSpectrum, Instrument, HDF5Interface, stellar_tuple, cheb_tuple, cov_tuple, region_tuple, outdir=""):
self.DataSpectrum = DataSpectrum
self.stellar_tuple = stellar_tuple
self.cheb_tuple = cheb_tuple
self.cov_tuple = cov_tuple
self.region_tuple = region_tuple
self.outdir = outdir
self.orders = self.DataSpectrum.orders
self.norders = self.DataSpectrum.shape[0]
#Determine whether `alpha` is in the `stellar_tuple`, then choose trilinear.
if 'alpha' not in self.stellar_tuple:
trilinear = True
else:
trilinear = False
myInterpolator = Interpolator(HDF5Interface, self.DataSpectrum, trilinear=trilinear, log=False)
self.ModelSpectrum = ModelSpectrumHA(myInterpolator, Instrument)
self.stellar_params = None
#Now create a a list which contains an OrderModel for each order
self.OrderModels = [OrderModel(self.ModelSpectrum, self.DataSpectrum, index) for index in range(self.norders)]
def zip_stellar_p(self, p):
return dict(zip(self.stellar_tuple, p))
def zip_Cheb_p(self, p):
return dict(zip(self.cheb_tuple, p))
def zip_Cov_p(self, p):
return dict(zip(self.cov_tuple, p))
def zip_Region_p(self, p):
return dict(zip(self.region_tuple, p))
def update_Model(self, params):
self.ModelSpectrum.update_all(params)
self.stellar_params = params
#Since the ModelSpectrum fluxes have been updated, also update the interpolation errors
model_errs = self.ModelSpectrum.downsampled_errors
for orderModel in self.OrderModels:
errspecs = np.ascontiguousarray(model_errs[:, orderModel.index, :])
orderModel.CovarianceMatrix.update_interp_errs(errspecs)
def get_data(self):
'''
Returns a DataSpectrum object.
'''
return self.DataSpectrum
def evaluate(self):
'''
Compare the different data and models.
'''
#Incorporate priors using self.ModelSpectrum.params, self.ChebyshevSpectrum.c0s, cns, self.CovarianceMatrix.params, etc...
lnps = np.empty((self.norders,))
for i in range(self.norders):
#Correct the warp of the model using the ChebyshevSpectrum
# model_fl = self.OrderModels[i].ChebyshevSpectrum.k * self.ModelSpectrum.downsampled_fls[i]
#Evaluate using the current CovarianceMatrix
# lnps[i] = self.OrderModels[i].evaluate(model_fl)
lnps[i] = self.OrderModels[i].evaluate()
return np.sum(lnps)
def to_json(self, fname="model.json"):
'''
Write all of the available parameters to a JSON file so that we may go back and re-create the model.
'''
f = open(self.outdir + fname, 'w')
json.dump(self, f, cls=ModelEncoder, indent=2, sort_keys=True)
f.close()
class Sampler(GibbsSampler):
'''
Subclasses the GibbsSampler in emcee
:param cov:
:param starting_param_dict: the dictionary of starting parameters
:param cov: the MH proposal
:param revertfn:
:param acceptfn:
:param debug:
'''
def __init__(self, **kwargs):
self.dim = len(self.param_tuple)
#p0 = np.empty((self.dim,))
#starting_param_dict = kwargs.get("starting_param_dict")
#for i,param in enumerate(self.param_tuple):
# p0[i] = starting_param_dict[param]
kwargs.update({"dim":self.dim})
#self.spectra_list = kwargs.get("spectra_list", [0])
super(Sampler, self).__init__(**kwargs)
#Each subclass will have to overwrite how it parses the param_dict into the correct order
#and sets the param_tuple
#SUBCLASS here and define self.param_tuple
#SUBCLASS here and define self.lnprob
#SUBCLASS here and do self.revertfn
#then do super().__init__() to call the following code
self.outdir = kwargs.get("outdir", "")
def startdict_to_tuple(self, startdict):
raise NotImplementedError("To be implemented by a subclass!")
def zip_p(self, p):
return dict(zip(self.param_tuple, p))
def lnprob(self):
raise NotImplementedError("To be implemented by a subclass!")
def revertfn(self):
raise NotImplementedError("To be implemented by a subclass!")
def acceptfn(self):
raise NotImplementedError("To be implemented by a subclass!")
def write(self):
'''
Write all of the relevant sample output to an HDF file.
Write the lnprobability to an HDF file.
flatchain
acceptance fraction
tuple parameters as an attribute in the header from self.param_tuple
The actual HDF5 file is structured as follows
/
stellar parameters.flatchain
00/
...
22/
23/
global_cov.flatchain
regions/
region1.flatchain
Everything can be saved in the dataset self.fname
'''
filename = self.outdir + "flatchains.hdf5"
self.logger.debug("Opening {} for writing HDF5 flatchains".format(filename))
hdf5 = h5py.File(filename, "w")
samples = self.flatchain
self.logger.debug("Creating dataset with fname:{}".format(self.fname))
dset = hdf5.create_dataset(self.fname, samples.shape, compression='gzip', compression_opts=9)
self.logger.debug("Storing samples and header attributes.")
dset[:] = samples
dset.attrs["parameters"] = "{}".format(self.param_tuple)
dset.attrs["acceptance"] = "{}".format(self.acceptance_fraction)
dset.attrs["acor"] = "{}".format(self.acor)
dset.attrs["commit"] = "{}".format(C.get_git_commit())
hdf5.close()
#lnprobability is the lnprob at each sample
filename = self.outdir + "lnprobs.hdf5"
self.logger.debug("Opening {} for writing HDF5 lnprobs".format(filename))
hdf5 = h5py.File(filename, "w")
lnprobs = self.lnprobability
dset = hdf5.create_dataset(self.fname, samples.shape[:1], compression='gzip', compression_opts=9)
dset[:] = lnprobs
dset.attrs["commit"] = "{}".format(C.get_git_commit())
hdf5.close()
def plot(self, triangle_plot=False):
'''
Generate the relevant plots once the sampling is done.
'''
samples = self.flatchain
plot_walkers(self.outdir + self.fname + "_chain_pos.png", samples, labels=self.param_tuple)
if triangle_plot:
import triangle
figure = triangle.corner(samples, labels=self.param_tuple, quantiles=[0.16, 0.5, 0.84],
show_titles=True, title_args={"fontsize": 12})
figure.savefig(self.outdir + self.fname + "_triangle.png")
plt.close(figure)
class PSampler(ParallelSampler):
'''
Subclasses the GibbsSampler in emcee
:param cov:
:param starting_param_dict: the dictionary of starting parameters
:param cov: the MH proposal
:param revertfn:
:param acceptfn:
:param debug:
'''
def __init__(self, **kwargs):
self.dim = len(self.param_tuple)
#p0 = np.empty((self.dim,))
#starting_param_dict = kwargs.get("starting_param_dict")
#for i,param in enumerate(self.param_tuple):
# p0[i] = starting_param_dict[param]
kwargs.update({"dim":self.dim})
#self.spectra_list = kwargs.get("spectra_list", [0])
super(PSampler, self).__init__(**kwargs)
#Each subclass will have to overwrite how it parses the param_dict into the correct order
#and sets the param_tuple
#SUBCLASS here and define self.param_tuple
#SUBCLASS here and define self.lnprob
#SUBCLASS here and do self.revertfn
#then do super().__init__() to call the following code
self.outdir = kwargs.get("outdir", "")
def startdict_to_tuple(self, startdict):
raise NotImplementedError("To be implemented by a subclass!")
def zip_p(self, p):
return dict(zip(self.param_tuple, p))
def lnprob(self):
raise NotImplementedError("To be implemented by a subclass!")
def revertfn(self):
raise NotImplementedError("To be implemented by a subclass!")
def acceptfn(self):
raise NotImplementedError("To be implemented by a subclass!")
def write(self):
'''
Write all of the relevant sample output to an HDF file.
Write the lnprobability to an HDF file.
flatchain
acceptance fraction
tuple parameters as an attribute in the header from self.param_tuple
The actual HDF5 file is structured as follows
/
stellar parameters.flatchain
00/
...
22/
23/
global_cov.flatchain
regions/
region1.flatchain
Everything can be saved in the dataset self.fname
'''
filename = self.outdir + "flatchains.hdf5"
self.logger.debug("Opening {} for writing HDF5 flatchains".format(filename))
hdf5 = h5py.File(filename, "w")
samples = self.flatchain
self.logger.debug("Creating dataset with fname:{}".format(self.fname))
dset = hdf5.create_dataset(self.fname, samples.shape, compression='gzip', compression_opts=9)
self.logger.debug("Storing samples and header attributes.")
dset[:] = samples
dset.attrs["parameters"] = "{}".format(self.param_tuple)
dset.attrs["acceptance"] = "{}".format(self.acceptance_fraction)
dset.attrs["acor"] = "{}".format(self.acor)
dset.attrs["commit"] = "{}".format(C.get_git_commit())
hdf5.close()
#lnprobability is the lnprob at each sample
filename = self.outdir + "lnprobs.hdf5"
self.logger.debug("Opening {} for writing HDF5 lnprobs".format(filename))
hdf5 = h5py.File(filename, "w") #creates if doesn't exist, otherwise read/write
lnprobs = self.lnprobability
dset = hdf5.create_dataset(self.fname, samples.shape[:1], compression='gzip', compression_opts=9)
dset[:] = lnprobs
dset.attrs["commit"] = "{}".format(C.get_git_commit())
hdf5.close()
def plot(self, triangle_plot=False):
'''
Generate the relevant plots once the sampling is done.
'''
samples = self.flatchain
plot_walkers(self.outdir + self.fname + "_chain_pos.png", samples, labels=self.param_tuple)
if triangle_plot:
import triangle
figure = triangle.corner(samples, labels=self.param_tuple, quantiles=[0.16, 0.5, 0.84],
show_titles=True, title_args={"fontsize": 12})
figure.savefig(self.outdir + self.fname + "_triangle.png")
plt.close(figure)
class StellarSampler(PSampler):
"""
Subclasses the Sampler specifically for stellar parameters
"""
def __init__(self, **kwargs):
'''
:param pconns: Collection of parent ends of the PIPEs
:type pconns: dict
:param starting_param_dict:
the dictionary of starting parameters
:param cov:
the MH proposal
:param fix_logg:
fix logg? If so, to what value?
:param debug:
:param args: []
'''
self.fix_logg = kwargs.get("fix_logg", False)
starting_pram_dict = kwargs.get("starting_param_dict")
self.param_tuple = self.startdict_to_tuple(starting_pram_dict)
print("param_tuple is {}".format(self.param_tuple))
self.p0 = np.array([starting_pram_dict[key] for key in self.param_tuple])
kwargs.update({"p0":self.p0, "revertfn":self.revertfn, "acceptfn": self.acceptfn, "lnprobfn":self.lnprob})
super(StellarSampler, self).__init__(**kwargs)
#self.pconns is a dictionary of parent connections to each PIPE connecting to the child processes.
self.spectrum_ids = sorted(self.pconns.keys())
self.fname = "stellar"
def startdict_to_tuple(self, startdict):
tup = ()
for param in C.stellar_parameters:
#check if param is in keys, if so, add to the tuple
if param in startdict:
tup += (param,)
return tup
def reset(self):
super(StellarSampler, self).reset()
def revertfn(self):
'''
Revert the model to the previous state of parameters, in the case of a rejected MH proposal.
'''
self.logger.debug("reverting stellar parameters")
self.prior = self.prior_last
#Decide we don't want these stellar params. Tell the children to reject the proposal.
for pconn in self.pconns.values():
pconn.send(("DECIDE", False))
def acceptfn(self):
'''
Execute this if the MH proposal is accepted.
'''
self.logger.debug("accepting stellar parameters")
#Decide we do want to keep these stellar params. Tell the children to accept the proposal.
for pconn in self.pconns.values():
pconn.send(("DECIDE", True))
def lnprob(self, p):
# We want to send the same stellar parameters to each model,
# but also send the different vz and logOmega parameters
# to the separate spectra, based upon spectrum_id.
#self.logger.debug("StellarSampler lnprob p is {}".format(p))
#Extract only the temp, logg, Z, vsini parameters
if not self.fix_logg:
params = self.zip_p(p[:4])
others = p[4:]
else:
#Coming in as temp, Z, vsini, vz, logOmega...
params = self.zip_p(p[:3])
others = p[3:]
params.update({"logg": self.fix_logg})
# Prior
self.prior_last = self.prior
logg = params["logg"]
self.prior = -0.5 * (logg - 5.0)**2/(0.05)**2
#others should now be either [vz, logOmega] or [vz0, logOmega0, vz1, logOmega1, ...] etc. Always div by 2.
#split p up into [vz, logOmega], [vz, logOmega] pairs that update the other parameters.
#mparams is now a list of parameter dictionaries
#Now, pack up mparams into a dictionary to send the right stellar parameters to the right subprocesses
mparams = {}
for (spectrum_id, order_id), (vz, logOmega) in zip(self.spectrum_ids, grouper(others, 2)):
p = params.copy()
p.update({"vz":vz, "logOmega":logOmega})
mparams[spectrum_id] = p
self.logger.debug("updated lnprob params: {}".format(mparams))
lnps = np.empty((self.nprocs,))
#Distribute the calculation to each process
self.logger.debug("Distributing params to children")
for ((spectrum_id, order_id), pconn) in self.pconns.items():
#Parse the parameters into what needs to be sent to each Model here.
pconn.send(("LNPROB", mparams[spectrum_id]))
#Collect the answer from each process
self.logger.debug("Collecting params from children")
for i, pconn in enumerate(self.pconns.values()):
lnps[i] = pconn.recv()
self.logger.debug("lnps : {}".format(lnps))
s = np.sum(lnps)
self.logger.debug("sum lnps {}".format(s))
return s + self.prior
class NuisanceSampler(Sampler):
def __init__(self, **kwargs):
'''
:param OrderModel: the parallel.OrderModel instance
:param starting_param_dict: the dictionary of starting parameters
:param cov:
the MH proposal
:param debug:
:param args: []
'''
starting_param_dict = kwargs.get("starting_param_dict")
self.param_tuple = self.startdict_to_tuple(starting_param_dict)
print("param_tuple is {}".format(self.param_tuple))
#print("param_tuple length {}".format(len(self.param_tuple)))
chebs = [starting_param_dict["cheb"][key] for key in self.cheb_tup]
covs = [starting_param_dict["cov"][key] for key in self.cov_tup]
regions = starting_param_dict["regions"]
#print("initializing {}".format(regions))
regs = [regions[id][kk] for id in sorted(regions) for kk in C.cov_region_parameters]
#print("regs {}".format(regs))
self.p0 = np.array(chebs + covs + regs)
kwargs.update({"p0":self.p0, "revertfn":self.revertfn, "lnprobfn":self.lnprob})
super(NuisanceSampler, self).__init__(**kwargs)
self.model = kwargs.get("OrderModel")
spectrum_id, order_id = self.model.id
order = kwargs.get("order", order_id)
#self.fname = "{}/{}/{}".format(spectrum_id, order, "nuisance")
self.fname = "nuisance"
self.params = None
self.prior_params = kwargs.get("prior_params", None)
if self.prior_params:
self.sigma0 = self.prior_params["regions"]["sigma0"]
self.mus = self.prior_params["regions"]["mus"]
self.mu_width = self.prior_params["regions"]["mu_width"]
self.sigma_knee = self.prior_params["regions"]["sigma_knee"]
self.frac_global = self.prior_params["regions"]["frac_global"]
def startdict_to_tuple(self, startdict):
#This is a little more tricky than the stellar parameters.
#How are the keys stored and passed in the dictionary?
#{"cheb": [c0, c1, c2, ..., cn], "cov": [sigAmp, logAmp, l],
# "regions":{0: [logAmp, ], 1: [], N:[] }}
#Serialize the cheb parameters
self.ncheb = len(startdict["cheb"])
self.cheb_tup = ("logc0",) + tuple(["c{}".format(i) for i in range(1, self.ncheb)])
#Serialize the covariance parameters
self.ncov = 3
cov_tup = ()
for param in C.cov_global_parameters:
#check if param is in keys, if so, add to the tuple
if param in startdict["cov"]:
cov_tup += (param,)
self.cov_tup = cov_tup
regions_tup = ()
self.regions = startdict.get("regions", None)
if self.regions:
self.nregions = len(self.regions)
for key in sorted(self.regions.keys()):
for kk in C.cov_region_parameters:
regions_tup += ("r{:0>2}-{}".format(key,kk),)
self.regions_tup = regions_tup
else:
self.nregions = 0
self.regions_tup = ()
tup = self.cheb_tup + self.cov_tup + self.regions_tup
#This should look like
#tup = ("c0", "c1", ..., "cn", "sigAmp", "logAmp", "l", "r00_logAmp", "r00_mu", "r00_sigma",
# "r01_logAmp", ..., "rNN_sigma")
return tup
def zip_p(self, p):
'''
Convert the vector to a dictionary
'''
cheb = dict(zip(self.cheb_tup, p[:self.ncheb]))
cov = dict(zip(self.cov_tup, p[self.ncheb:self.ncheb+self.ncov]))
regions = p[-self.nregions*3:]
rdict = {}
for i in range(self.nregions):
rdict[i] = dict(zip(("logAmp", "mu", "sigma"), regions[i*3:3*(i+1)]))
params = {"cheb":cheb, "cov":cov, "regions":rdict}
return params
def revertfn(self):
self.logger.debug("reverting model")
self.model.prior = self.prior_last
self.params = self.params_last
self.model.revert_nuisance()
def lnprob(self, p):
self.params_last = self.params
params = self.zip_p(p)
self.params = params
self.logger.debug("Updating nuisance params {}".format(params))
# Nuisance parameter priors implemented here
self.prior_last = self.model.prior
# Region parameter priors implemented here
if self.nregions > 0:
regions = params["regions"]
keys = sorted(regions)
#Unpack the region parameters into a vector of mus, amps, and sigmas
amps = 10**np.array([regions[key]["logAmp"] for key in keys])
cov_amp = 10**params["cov"]["logAmp"]
#First check to make sure that amplitude can't be some factor less than the global covariance
if np.any(amps < (cov_amp * self.frac_global)):
return -np.inf
mus = np.array([regions[key]["mu"] for key in keys])
sigmas = np.array([regions[key]["sigma"] for key in keys])
#Make sure the region hasn't strayed too far from the original specification
if np.any(np.abs(mus - self.mus) > self.sigma0):
# The region has strayed too far from the original specification
return -np.inf
#Use a Gaussian prior on mu, that it keeps the region within the original setting.
# 1/(sqrt(2pi) * sigma) exp(-0.5 (mu-x)^2/sigma^2)
#-ln(sigma * sqrt(2 pi)) - 0.5 (mu - x)^2 / sigma^2
#width = 0.04
lnGauss = -0.5 * np.sum(np.abs(mus - self.mus)**2/self.mu_width**2 -
np.log(self.mu_width * np.sqrt(2. * np.pi)))
# Use a ln(logistic) function on sigma, that is flat before the knee and dies off for anything
# greater, to prevent dilution into global cov kernel
lnLogistic = np.sum(np.log(-1./(1. + np.exp(self.sigma_knee - sigmas)) + 1.))
self.model.prior = lnLogistic + lnGauss
try:
self.model.update_nuisance(params)
lnp = self.model.evaluate() # also sets OrderModel.lnprob to proposed value. Includes self.model.prior
return lnp
except C.ModelError:
return -np.inf
def main():
print("Starting main of model")
pass
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
db10855a9829a6b7fc7b3239048bf8d6e30c8849 | 83e18f5d4fcd7084defb32981337a8f9b646c4c7 | /python/91.decode-ways.py | 926f10a41f2cdbf2fa346422937918a04b9d8437 | [
"MIT"
] | permissive | Zhenye-Na/leetcode | 709037a318e1be7e6ab92751f8695d888900591a | 18d91a6ba813f91531b04632563212dfde2cceb9 | refs/heads/master | 2023-04-10T07:06:06.502224 | 2023-04-01T00:18:44 | 2023-04-01T00:18:44 | 145,656,854 | 19 | 9 | MIT | 2022-05-16T03:14:02 | 2018-08-22T04:39:25 | Python | UTF-8 | Python | false | false | 2,549 | py | #
# @lc app=leetcode id=91 lang=python3
#
# [91] Decode Ways
#
# https://leetcode.com/problems/decode-ways/description/
#
# algorithms
# Medium (27.50%)
# Likes: 4791
# Dislikes: 3513
# Total Accepted: 593.9K
# Total Submissions: 2.1M
# Testcase Example: '"12"'
#
# A message containing letters from A-Z can be encoded into numbers using the
# following mapping:
#
#
# 'A' -> "1"
# 'B' -> "2"
# ...
# 'Z' -> "26"
#
#
# To decode an encoded message, all the digits must be grouped then mapped back
# into letters using the reverse of the mapping above (there may be multiple
# ways). For example, "11106" can be mapped into:
#
#
# "AAJF" with the grouping (1 1 10 6)
# "KJF" with the grouping (11 10 6)
#
#
# Note that the grouping (1 11 06) is invalid because "06" cannot be mapped
# into 'F' since "6" is different from "06".
#
# Given a string s containing only digits, return the number of ways to decode
# it.
#
# The answer is guaranteed to fit in a 32-bit integer.
#
#
# Example 1:
#
#
# Input: s = "12"
# Output: 2
# Explanation: "12" could be decoded as "AB" (1 2) or "L" (12).
#
#
# Example 2:
#
#
# Input: s = "226"
# Output: 3
# Explanation: "226" could be decoded as "BZ" (2 26), "VF" (22 6), or "BBF" (2
# 2 6).
#
#
# Example 3:
#
#
# Input: s = "0"
# Output: 0
# Explanation: There is no character that is mapped to a number starting with
# 0.
# The only valid mappings with 0 are 'J' -> "10" and 'T' -> "20", neither of
# which start with 0.
# Hence, there are no valid ways to decode this since all digits need to be
# mapped.
#
#
# Example 4:
#
#
# Input: s = "06"
# Output: 0
# Explanation: "06" cannot be mapped to "F" because of the leading zero ("6" is
# different from "06").
#
#
#
# Constraints:
#
#
# 1 <= s.length <= 100
# s contains only digits and may contain leading zero(s).
#
#
#
# @lc code=start
class Solution:
def numDecodings(self, s: str) -> int:
s = "#" + s
n = len(s)
dp = [0 for _ in range(n)]
dp[0] = 1
if s[1] == "0":
return 0
else:
dp[1] = 1
for i in range(2, n):
if s[i] == "0":
if s[i - 1] == "1" or s[i - 1] == "2":
dp[i] += dp[i - 2]
else:
return 0
else:
# s[i] = 1 ... 9
dp[i] += dp[i - 1]
if s[i - 1] == "1" or s[i - 1] == "2" and int(s[i]) <= 6:
dp[i] += dp[i - 2]
return dp[n - 1]
# @lc code=end
| [
"[email protected]"
] | |
b042229ecf57d55791109c255c5332fd28a2c071 | c1a1d21ff56175c00f89cfb721f3eec1575e1b2e | /code/python/leetcode/pascals-triangle-ii.py | 36f76aa085ab7d4d90bb6611a02743c65d565a16 | [] | no_license | zhuzeyu22/cowry | ee8501049447b694d35cce88392405610334382e | e135038caff7fc0743e33525413d415ac69ac898 | refs/heads/master | 2020-05-29T11:53:10.981688 | 2016-06-02T14:18:31 | 2016-06-02T14:18:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | class Solution:
# @return a list of lists of integers
def generate(self, numRows):
if numRows == 0:
return []
l = [[1],]
while len(l) < numRows:
nrow = [1] + [0] * (len(l)-1) + [1]
for i in range(1, len(nrow) - 1):
# print i, nrow, l
nrow[i] += l[-1][i-1] + l[-1][i]
l.append(nrow)
return l
def getRow(self, rowIndex):
return self.generate(rowIndex+1)[-1]
print Solution().getRow(3) | [
"[email protected]"
] | |
8904c1ec13f8e7ca960fa0862d9094244fd30a15 | 5ebfbdf37806f8c571b85e70c755e274b7ec3b92 | /tf/compose/modify.py | 0da2bb1bc637c92884445d19c69dee7ae38960c6 | [
"MIT"
] | permissive | moranegg/text-fabric | 4992d81a8038a20e4381d17e0b5e9e08cf73ff72 | d9214b7d60991189682ad91d3fc5588410462f24 | refs/heads/master | 2020-09-24T23:59:29.873507 | 2019-10-31T16:37:59 | 2019-10-31T16:37:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,760 | py | import collections
from ..fabric import Fabric
from ..core.data import WARP
from ..core.timestamp import Timestamp
from ..core.helpers import itemize, isInt, collectFormats, dirEmpty
OTYPE = WARP[0]
OSLOTS = WARP[1]
OTEXT = WARP[2]
VALTP = "valueType"
GENERATED = set(
"""
writtenBy
dateWritten
version
""".strip().split()
)
NODE = "node"
NODES = "nodes"
EDGE = "edge"
EDGES = "edges"
NFS = "nodeFeatures"
EFS = "edgeFeatures"
ADD_F_KEYS = {NFS, EFS}
NF = "nodeFrom"
NT = "nodeTo"
NS = "nodeSlots"
ADD_T_KEYS = {NF, NT, NS, NFS, EFS}
SE_TP = "sectionTypes"
SE_FT = "sectionFeatures"
ST_TP = "structureTypes"
ST_FT = "structureFeatures"
TM = Timestamp()
indent = TM.indent
info = TM.info
error = TM.error
isSilent = TM.isSilent
setSilent = TM.setSilent
def _itemize(arg):
return [] if not arg else itemize(arg) if type(arg) is str else list(arg)
def _rep(iterable):
return ", ".join(sorted(iterable))
def modify(
location,
targetLocation,
mergeFeatures=None,
deleteFeatures=None,
addFeatures=None,
mergeTypes=None,
deleteTypes=None,
addTypes=None,
featureMeta=None,
silent=False,
):
addFeatures = addFeatures or {}
deleteFeatures = set(_itemize(deleteFeatures))
mergeFeatures = mergeFeatures or {}
addTypes = addTypes or {}
deleteTypes = set(_itemize(deleteTypes))
mergeTypes = mergeTypes or {}
featureMeta = featureMeta or {}
origMaxNode = None
origNodeTypes = None
origNodeFeatures = None
origEdgeFeatures = None
origFeatures = None
shift = {}
shiftNeeded = False
slotType = None
maxNode = None
nodeFeatures = {}
edgeFeatures = {}
deletedTypes = set()
deletedFeatures = set()
nodeTypes = {}
nodeFeaturesOut = {}
edgeFeaturesOut = {}
metaDataOut = {}
api = None
good = True
ePrefix = ""
eItem = ""
def err(msg):
nonlocal good
error(f"{ePrefix}{eItem}{msg}", tm=False)
good = False
def inf(msg):
info(f"{ePrefix}{eItem}{msg}", tm=False)
def meta(feat):
return api.TF.features[feat].metaData
def valTp(feat):
return meta(feat).get(VALTP, None)
def otextInfo():
orig = meta(OTEXT)
custom = featureMeta.get(OTEXT, {})
combi = {}
for key in set(custom) | set(orig):
origVal = orig.get(key, "")
customVal = custom.get(key, "")
combi[key] = customVal or origVal
ensureTypes = set()
ensureFeatures = set()
for kind in (SE_TP, ST_TP):
ensureTypes |= set(itemize(combi.get(kind, ""), sep=","))
for kind in (SE_FT, ST_FT):
ensureFeatures |= set(itemize(combi.get(kind, ""), sep=","))
ensureFeatures |= set(collectFormats(combi)[1])
return (ensureTypes, ensureFeatures)
def allInt(values):
return all(isInt(v) for v in values)
def prepare():
nonlocal api
nonlocal origNodeTypes
nonlocal origFeatures
nonlocal origNodeFeatures
nonlocal origEdgeFeatures
nonlocal origMaxNode
nonlocal maxNode
nonlocal shift
nonlocal ePrefix
nonlocal eItem
indent(level=0, reset=True)
info("preparing and checking ...")
indent(level=1, reset=True)
TF = Fabric(locations=location, silent=silent)
origAllFeatures = TF.explore(silent=silent or True, show=True)
origNodeFeatures = set(origAllFeatures[NODES])
origEdgeFeatures = set(origAllFeatures[EDGES])
origFeatures = origNodeFeatures | origEdgeFeatures
api = TF.load("", silent=silent)
if not api:
return False
F = api.F
C = api.C
origNodeTypes = {x[0]: (x[2], x[3]) for x in C.levels.data}
origMaxSlot = F.otype.maxSlot
origMaxNode = F.otype.maxNode
maxNode = origMaxNode
addedTp = set()
addedFt = set()
deletedTp = set()
deletedFt = set()
# check mergeFeatures
ePrefix = "Merge features: "
for (outFeat, inFeats) in mergeFeatures.items():
eItem = f"{outFeat}: "
inFeats = _itemize(inFeats)
if outFeat in WARP:
err(f"Can not merge into standard features")
continue
if not inFeats:
err("Nothing to merge from")
continue
addedFt.add(outFeat)
for inFeat in inFeats:
if inFeat in WARP:
err(f"Can not merge from standard features: {inFeat}")
continue
deletedFt.add(inFeat)
missingIn = set(f for f in inFeats if f not in origFeatures)
if missingIn:
err(f"Missing features {_rep(missingIn)}")
allInIsNode = all(f in origNodeFeatures for f in inFeats)
allInIsEdge = all(f in origEdgeFeatures for f in inFeats)
outExists = outFeat in origFeatures
outIsNode = outExists and outFeat in origNodeFeatures
outIsEdge = outExists and outFeat in origEdgeFeatures
if outIsNode and not allInIsNode:
err(f"Node Feature can not be merged from an edge feature")
if outIsEdge and not allInIsEdge:
err(f"Edge Feature can not be merged from a node feature")
if not allInIsNode and not allInIsEdge:
err(f"Feature can not be merged from both node and edge features")
allInIsInt = all(valTp(f) == "int" for f in inFeats)
correctTp = "int" if allInIsInt else "str"
checkValType(outFeat, correctTp=correctTp)
# check deleteFeatures
ePrefix = "Delete features: "
for feat in deleteFeatures:
eItem = f"{feat}: "
if feat in WARP:
err(f"Can not delete standard features")
continue
if feat not in origFeatures:
err(f"Not in data set")
deletedFt.add(feat)
# check addFeatures
ePrefix = "Add features: "
eItem = ""
illegalKeys = set(addFeatures) - ADD_F_KEYS
if illegalKeys:
err(f"{_rep(illegalKeys)} unrecognized, expected {_rep(ADD_F_KEYS)}")
bothFeatures = set(addFeatures.get(NFS, {})) & set(addFeatures.get(EFS, {}))
if bothFeatures:
err(f"{_rep(bothFeatures)}: Both node and edge features")
for (kind, otherKind, origSet, origSetOther) in (
(NODE, EDGE, origNodeFeatures, origEdgeFeatures),
(EDGE, NODE, origEdgeFeatures, origNodeFeatures),
):
for (feat, data) in addFeatures.get(f"{kind}Features", {}).items():
eItem = f"{feat}: "
if feat in WARP:
err(f"Cannot add standard features")
continue
if feat in origSetOther:
err(f"{kind} feature already exists as {otherKind} feature")
checkValType(feat, vals=data.values())
addedFt.add(feat)
# check mergeTypes
ePrefix = "Merge types: "
mData = {}
for (outType, inTypes) in mergeTypes.items():
eItem = f"{outType}: "
if outType == slotType:
err(f"Result cannot be the slot type")
withFeatures = type(inTypes) is dict
addedTp.add(outType)
for inType in inTypes:
if inType == slotType:
err(f"Slot type {inType} is not mergeable")
continue
if inType not in origNodeTypes:
err(f"Cannot merge non-existing node type {inType}")
continue
deletedTp.add(inType)
mFeatures = inTypes[inType] if withFeatures else {}
for (feat, val) in mFeatures.items():
mData.setdefault(feat, set()).add(val)
addedFt.add(feat)
for (feat, vals) in mData.items():
eItem = f"{feat}: "
checkValType(feat, vals=vals)
# check deleteTypes
ePrefix = "Delete types: "
for nodeType in deleteTypes:
eItem = f"{nodeType}: "
if nodeType not in origNodeTypes:
err(f"Not in data set")
continue
deletedTp.add(nodeType)
# check addTypes
ePrefix = "Add types: "
for (nodeType, typeInfo) in sorted(addTypes.items()):
eItem = f"{nodeType}: "
illegalKeys = set(typeInfo) - ADD_T_KEYS
if illegalKeys:
err(f"{_rep(illegalKeys)} unrecognized, expected {_rep(ADD_T_KEYS)}")
continue
if nodeType in origNodeTypes:
err(f"Already occurs")
continue
addedTp.add(nodeType)
nodeSlots = typeInfo.get(NS, {})
if not nodeSlots:
err(f"No slot information given")
nF = typeInfo.get(NF, None)
if not nF:
err(f"No lower bound given")
nT = typeInfo.get(NT, None)
if not nT:
err(f"No upper bound given")
if nF is not None and nT is not None:
unlinked = 0
badlinked = 0
for n in range(nF, nT + 1):
slots = nodeSlots.get(n, ())
if not slots:
unlinked += 1
else:
slotGood = True
for slot in slots:
if slot < 1 or slot > origMaxSlot:
slotGood = False
if not slotGood:
badlinked += 1
if unlinked:
err(f"{unlinked} nodes not linked to slots")
if badlinked:
err(f"{badlinked} nodes linked to non-slot nodes")
for kind in (NODE, EDGE):
for (feat, data) in typeInfo.get(f"{kind}Features", {}).items():
eItem = f"{feat}: "
checkValType(feat, vals=data.values())
addedFt.add(feat)
(otextTypes, otextFeatures) = otextInfo()
problemTypes = addedTp & deletedTp
if problemTypes:
ePrefix = "Add and then delete: "
eItem = "types: "
err(f"{_rep(problemTypes)}")
problemTypes = otextTypes - ((set(origNodeTypes) | addedTp) - deletedTp)
if problemTypes:
ePrefix = "Missing for text API: "
eItem = "types: "
err(f"{_rep(problemTypes)}")
problemFeats = addedFt & deletedFt
if problemFeats:
ePrefix = "Add and then delete: "
eItem = "features: "
err(f"{_rep(problemFeats)}")
problemFeats = otextFeatures - ((origFeatures | addedFt) - deletedFt)
if problemFeats:
ePrefix = "Missing for text API: "
eItem = "features: "
err(f"{_rep(problemFeats)}")
if not dirEmpty(targetLocation):
ePrefix = "Output directory: "
eItem = "not empty: "
err(f"Clean it or remove it or choose another location")
if not good:
return False
api = TF.loadAll()
info("done")
return True
def checkValType(feat, vals=None, correctTp=None):
origTp = valTp(feat) if feat in origFeatures else None
customTp = featureMeta.get(feat, {}).get(VALTP, None)
assignedTp = origTp or customTp
if correctTp is None:
correctTp = "int" if allInt(vals) else "str"
newTp = customTp or correctTp
if newTp != assignedTp:
featureMeta.setdefault(feat, {})[VALTP] = newTp
if customTp and customTp != correctTp and customTp == "int":
err(f"feature values are declared to be int but some values are not int")
if assignedTp != newTp:
rep1 = f"feature of type {newTp}"
rep2 = f" (was {assignedTp})" if assignedTp else ""
inf(f"{rep1}{rep2}")
def shiftx(vs, offset=None, nF=None, nT=None):
if offset is None:
return (
{shift[m]: v for (m, v) in vs.items()}
if type(vs) is dict
else {shift[m] for m in vs}
)
else:
return (
{m + offset: v for (m, v) in vs.items() if nF <= m <= nT}
if type(vs) is dict
else {m + offset for m in vs if nF <= m <= nT}
)
def shiftFeature(kind, feat, data):
return (
{shift[n]: v for (n, v) in data.items() if n in shift}
if kind == NODE
else {shift[n]: shiftx(v) for (n, v) in data.items() if n in shift}
)
def mergeF():
nonlocal deletedFeatures
Fs = api.Fs
Es = api.Es
indent(level=0)
if mergeFeatures:
info("merge features ...")
indent(level=1, reset=True)
inF = set()
for (outFeat, inFeats) in mergeFeatures.items():
data = {}
inFeats = _itemize(inFeats)
if all(f in origNodeFeatures for f in inFeats):
featSrc = Fs
featDst = nodeFeatures
else:
featSrc = Es
featDst = edgeFeatures
for inFeat in inFeats:
for (n, val) in featSrc(inFeat).data.items():
data[n] = val
featDst.setdefault(outFeat, {}).update(data)
for inFeat in inFeats:
inF.add(inFeat)
if inFeat in featDst:
del featDst[inFeat]
deletedFeatures |= inF
if mergeFeatures:
info(f"done (deleted {len(inF)} and added {len(mergeFeatures)} features)")
indent(level=2)
info(f"deleted {_rep(inF)}", tm=False)
info(f"added {_rep(mergeFeatures)}", tm=False)
return True
def deleteF():
indent(level=0)
if deleteFeatures:
info("delete features ...")
indent(level=1, reset=True)
for feat in deleteFeatures:
dest = (
nodeFeatures
if feat in origNodeFeatures
else edgeFeatures
if feat in origEdgeFeatures
else None
)
if dest and feat in dest:
del dest[feat]
deletedFeatures.add(feat)
if deleteFeatures:
info(f"done ({len(deleteFeatures)} features)")
indent(level=2)
info(_rep(deleteFeatures), tm=False)
return True
def addF():
indent(level=0)
if addFeatures:
info("add features ...")
indent(level=1, reset=True)
added = collections.defaultdict(set)
for (kind, dest) in (
(NODE, nodeFeatures),
(EDGE, edgeFeatures),
):
for (feat, data) in addFeatures.get(f"{kind}Features", {}).items():
dest.setdefault(feat, {}).update(data)
added[kind].add(feat)
if addFeatures:
info(
f'done (added {len(added["node"])} node + {len(added["edge"])} edge features)'
)
indent(level=2)
for (kind, feats) in sorted(added.items()):
info(f"{kind} features: {_rep(feats)}")
return True
def mergeT():
nonlocal deletedTypes
indent(level=0)
if mergeTypes:
info("merge types ...")
indent(level=1, reset=True)
inT = set()
for (outType, inTypes) in mergeTypes.items():
info(f"Merging {outType}")
withFeatures = type(inTypes) is dict
for inType in inTypes:
addFeatures = inTypes[inType] if withFeatures else {}
addFeatures[OTYPE] = outType
(nF, nT) = origNodeTypes[inType]
for (feat, val) in addFeatures.items():
for n in range(nF, nT + 1):
nodeFeatures.setdefault(feat, {})[n] = val
inT.add(inType)
deletedTypes |= inT
if mergeTypes:
info(f"done (merged {len(mergeTypes)} node types)")
indent(level=2)
info(f"deleted {_rep(inT)}", tm=False)
info(f"added {_rep(mergeTypes)}", tm=False)
return True
def deleteT():
nonlocal maxNode
nonlocal shiftNeeded
indent(level=0)
if deleteTypes:
info("delete types ...")
indent(level=1, reset=True)
curShift = 0
for (nType, (nF, nT)) in sorted(origNodeTypes.items(), key=lambda x: x[1][0],):
if nType in deleteTypes:
curShift -= nT - nF + 1
deletedTypes.add(nType)
else:
nodeTypes[nType] = (nF + curShift, nT + curShift)
for n in range(nF, nT + 1):
shift[n] = n - curShift
for (kind, upd) in (
(NODE, nodeFeatures,),
(EDGE, edgeFeatures,),
):
for (feat, uData) in upd.items():
upd[feat] = shiftFeature(kind, feat, uData)
maxNode = origMaxNode - curShift
shiftNeeded = curShift != 0
if deleteTypes:
info(f"done ({len(deleteTypes)} types)")
indent(level=2)
info(_rep(deleteTypes), tm=False)
return True
def addT():
nonlocal maxNode
indent(level=0)
if addTypes:
info("add types ...")
indent(level=1, reset=True)
for (nodeType, typeInfo) in sorted(addTypes.items()):
nF = typeInfo[NF]
nT = typeInfo[NT]
offset = maxNode - nF + 1
nodeSlots = typeInfo[NS]
data = {}
for n in range(nF, nT + 1):
data[offset + n] = nodeType
nodeFeatures.setdefault(OTYPE, {}).update(data)
data = {}
for n in range(nF, nT + 1):
data[offset + n] = set(nodeSlots[n])
edgeFeatures.setdefault(OSLOTS, {}).update(data)
for (feat, addData) in typeInfo.get(NFS, {}).items():
data = {}
for n in range(nF, nT + 1):
value = addData.get(n, None)
if value is not None:
data[offset + n] = value
nodeFeatures.setdefault(feat, {}).update(data)
for (feat, addData) in typeInfo.get(EFS, {}).items():
data = {}
for n in range(nF, nT + 1):
value = addData.get(n, None)
if value:
newValue = shiftx(value, offset=offset, nF=nF, nT=nT)
if newValue:
data[offset + n] = newValue
edgeFeatures.setdefault(feat, {}).update(data)
maxNode += nT - nF + 1
if addTypes:
info(f"done ({len(addTypes)} types)")
indent(level=2)
info(_rep(addTypes), tm=False)
return True
def applyUpdates():
Fs = api.Fs
Es = api.Es
indent(level=0)
info("applying updates ...")
indent(level=1, reset=True)
mFeat = 0
for (kind, featSet, featSrc, featUpd, featOut) in (
(NODE, origNodeFeatures, Fs, nodeFeatures, nodeFeaturesOut),
(EDGE, origEdgeFeatures, Es, edgeFeatures, edgeFeaturesOut),
):
for feat in (set(featSet) | set(featUpd)) - deletedFeatures:
outData = {}
outMeta = {}
if feat in featSet:
featObj = featSrc(feat)
outMeta.update(featObj.meta)
if shiftNeeded:
outData.update(shiftFeature(kind, feat, featObj))
mFeat += 1
else:
outData.update(featObj.items())
if feat in featUpd:
outData.update(featUpd[feat])
if kind == EDGE:
aVal = next(iter(featUpd[feat].values()))
hasValues = type(aVal) is dict
if outMeta.get("edgeValues", False) != hasValues:
outMeta["edgeValues"] = hasValues
if feat in featureMeta:
for (k, v) in featureMeta[feat].items():
if v is None:
if k in outMeta:
del outMeta[k]
else:
outMeta[k] = v
featOut[feat] = outData
metaDataOut[feat] = outMeta
otextMeta = {}
otextMeta.update(meta(OTEXT))
mK = 0
if OTEXT in featureMeta:
for (k, v) in featureMeta[OTEXT].items():
if v is None:
if k in otextMeta:
del otextMeta[k]
mK += 1
else:
if k not in otextMeta or otextMeta[k] != v:
otextMeta[k] = v
mK += 1
metaDataOut[OTEXT] = otextMeta
if mFeat or mK:
fRep = f" (shifted {mFeat} features)" if mFeat else ""
kRep = f" (adapted {mK} keys in otext)" if mK else ""
info(f"done{fRep}{kRep}")
return True
def writeTf():
indent(level=0)
info("write TF data ...")
indent(level=1, reset=True)
TF = Fabric(locations=targetLocation, silent=silent or True)
TF.save(
metaData=metaDataOut,
nodeFeatures=nodeFeaturesOut,
edgeFeatures=edgeFeaturesOut,
)
return True
def finalize():
indent(level=0)
info("all done")
return True
def process():
for step in (
prepare,
mergeF,
deleteF,
addF,
mergeT,
deleteT,
addT,
applyUpdates,
writeTf,
finalize,
):
if not step():
return False
return True
wasSilent = isSilent()
setSilent(silent)
result = process()
setSilent(wasSilent)
return result
| [
"[email protected]"
] | |
6076b1d83d498c7d5098c42a7e74abd9531c111f | 47082917dde1e8af42197dbc299e69e62297b34f | /config/urls.py | 6352f0ca3dac2706a3823b1f981270aedd27e0c3 | [] | no_license | navill/Dstagram-repository | 66520ed6fd5e8cf3fe633c04c7a1eaaae9bc57c0 | fb44acea8906fcac5724d4f9bdfdfe4e218f947a | refs/heads/master | 2020-06-10T20:59:28.581733 | 2019-06-25T16:31:58 | 2019-06-25T16:31:58 | 193,745,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,572 | py | """config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static # 특정 리소스를 static형태로 응답
from django.conf import settings # 장고의 셋팅값을 불러다 주는 역할
urlpatterns = [
path('site_config/', admin.site.urls),
path('accounts/', include('allauth.urls')),
path('accounts/', include('accounts.urls')),
path('', include('photo.urls'))
]
# image 출력을 위해 다음 urlpattern을 추가
# -> deploy, live일 때는 사용하지 않음
# -> 장고에서 처리해야할 일이 아니기 때문에
# -> web server(heroku는 지원하지 않음)가 해주거나
# -> 파일 서버를 별도로 셋팅
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
#
# if settings.DEBUG:
# import debug_toolbar
# urlpatterns = [
# path('__debug__/', include(debug_toolbar.urls)),
# ] + urlpatterns | [
"[email protected]"
] | |
1f4f62246a43fa178cc25b4a6b8a2617bdb8d082 | ef54d37f8a3303013ca7469871a320d303957ed7 | /robo4.2/fusion/tests/wpst_crm/feature_tests/TBIRD/Non-Redundant-Nitro-Grow/data_ha.py | 3ef502d474e92c96999d5c63a96777977159f176 | [] | no_license | richa92/Jenkin_Regression_Testing | d18badfcf16bda682dfe7bcbbd66f54a9a27a58d | 24a74926170cbdfafa47e972644e2fe5b627d8ff | refs/heads/master | 2020-07-12T10:01:59.099137 | 2019-08-27T12:14:53 | 2019-08-27T12:14:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,434 | py | from data_common import *
CONFIG = 'HA'
update_downlink_speed = \
[
{
"op": "replace",
"path": "/downlinkSpeedMode",
"value": "SPEED_25GB"
}
]
uplink_set_1 = {
'name': 'US1',
'ethernetNetworkType': 'Tagged',
'networkType': 'Ethernet',
'lacpTimer': 'Short',
'mode': 'Auto',
'nativeNetworkUri': None,
'logicalPortConfigInfos': [
{'enclosure': '1', 'bay': '2', 'port': 'Q1', 'speed': 'Auto'},
]
}
uplink_set_2 = {
'name': 'US2',
'ethernetNetworkType': 'Tagged',
'networkType': 'Ethernet',
'lacpTimer': 'Short',
'mode': 'Auto',
'networkUris': ['net_420', 'net_421', 'net_422'],
'nativeNetworkUri': None,
'logicalPortConfigInfos': [
{'enclosure': '2', 'bay': '5', 'port': 'Q1', 'speed': 'Auto'}
]
}
add_uplinkset = {
'name': 'add_uplinkset',
'type': 'uplink-setV5',
'ethernetNetworkType': 'Tagged',
'networkType': 'Ethernet',
'networkUris': ['net_425', 'net_426'],
'manualLoginRedistributionState': 'NotSupported',
'connectionMode': 'Auto',
'portConfigInfos': [
{ 'desiredSpeed': 'Auto',
'location': {
'locationEntries': [
{
'value': 'Q6',
'type': 'Port'
},
{
'value': '2',
'type': 'Bay'
},
{
'value': ENC_1,
'type': 'Enclosure'
}
]
}
}
],
}
edit_uplinkset = {
'name': 'US1',
'type': 'uplink-setV5',
'ethernetNetworkType': 'Tagged',
'networkType': 'Ethernet',
'manualLoginRedistributionState': 'NotSupported',
'lacpTimer': 'Long',
'connectionMode': 'Auto',
'portConfigInfos': [
{
'desiredSpeed': 'Auto',
'location': {
'locationEntries': [
{
'value': 'Q1',
'type': 'Port'
},
{
'value': '2',
'type': 'Bay'
},
{
'value': ENC_1,
'type': 'Enclosure'
}
]
}
}
],
}
###
# Interconnect bays configurations
# 2 Enclosures, Fabric 2
###
Enc2AMap = \
[
{'bay': 2, 'enclosure': 1, 'type': 'Virtual Connect SE 100Gb F32 Module for Synergy', 'enclosureIndex': 1},
{'bay': 2, 'enclosure': 2, 'type': 'Synergy 50Gb Interconnect Link Module', 'enclosureIndex': 2}
]
Enc2BMap = \
[
{'bay': 5, 'enclosure': 1, 'type': 'Synergy 50Gb Interconnect Link Module', 'enclosureIndex': 1},
{'bay': 5, 'enclosure': 2, 'type': 'Virtual Connect SE 100Gb F32 Module for Synergy', 'enclosureIndex': 2}
]
###
###
# Interconnect bays configurations
# 3 Enclosures, Fabric 2
###
Enc3AMap = Enc2AMap + \
[
{'bay': 2, 'enclosure': 3, 'type': 'Synergy 50Gb Interconnect Link Module', 'enclosureIndex': 3}
]
Enc3BMap = Enc2BMap + \
[
{'bay': 5, 'enclosure': 3, 'type': 'Synergy 50Gb Interconnect Link Module', 'enclosureIndex': 3}
]
###
# Interconnect bays configurations
# 4 Enclosures, Fabric 3
###
Enc4AMap = Enc3AMap + \
[
{'bay': 2, 'enclosure': 4, 'type': 'Synergy 50Gb Interconnect Link Module', 'enclosureIndex': 4}
]
Enc4BMap = Enc3BMap + \
[
{'bay': 5, 'enclosure': 4, 'type': 'Synergy 50Gb Interconnect Link Module', 'enclosureIndex': 4}
]
###
# Interconnect bays configurations
# 5 Enclosures, Fabric 3
###
Enc5AMap = Enc4AMap + \
[
{'bay': 2, 'enclosure': 5, 'type': 'Synergy 50Gb Interconnect Link Module', 'enclosureIndex': 5}
]
Enc5BMap = Enc4BMap + \
[
{'bay': 5, 'enclosure': 5, 'type': 'Synergy 50Gb Interconnect Link Module', 'enclosureIndex': 5}
]
###
# Logical Interconnect Groups
###
ligs = {
'Enc2A-LIG': {
'name': 'Enc2A-LIG',
'interconnectMapTemplate': Enc2AMap,
'enclosureIndexes': [1, 2],
'interconnectBaySet': 2,
'redundancyType': 'NonRedundantASide',
'uplinkSets': [uplink_set_1],
},
'Enc2B-LIG': {
'name': 'Enc2B-LIG',
'interconnectMapTemplate': Enc2BMap,
'enclosureIndexes': [1, 2],
'interconnectBaySet': 2,
'redundancyType': 'NonRedundantBSide',
'uplinkSets': [uplink_set_2],
},
'Enc3A-LIG': {
'name': 'Enc3A-LIG',
'interconnectMapTemplate': Enc3AMap,
'enclosureIndexes': [1, 2, 3],
'interconnectBaySet': 2,
'redundancyType': 'NonRedundantASide',
'uplinkSets': [uplink_set_1],
},
'Enc3B-LIG': {
'name': 'Enc3B-LIG',
'interconnectMapTemplate': Enc3BMap,
'enclosureIndexes': [1, 2, 3],
'interconnectBaySet': 2,
'redundancyType': 'NonRedundantBSide',
'uplinkSets': [uplink_set_2],
},
'Enc4A-LIG': {
'name': 'Enc4A-LIG',
'interconnectMapTemplate': Enc4AMap,
'enclosureIndexes': [1, 2, 3, 4],
'interconnectBaySet': 2,
'redundancyType': 'NonRedundantASide',
'uplinkSets': [uplink_set_1],
},
'Enc4B-LIG': {
'name': 'Enc4B-LIG',
'interconnectMapTemplate': Enc4BMap,
'enclosureIndexes': [1, 2, 3, 4],
'interconnectBaySet': 2,
'redundancyType': 'NonRedundantBSide',
'uplinkSets': [uplink_set_2],
},
'Enc5A-LIG': {
'name': 'Enc5A-LIG',
'interconnectMapTemplate': Enc5AMap,
'enclosureIndexes': [1, 2, 3, 4, 5],
'interconnectBaySet': 2,
'redundancyType': 'NonRedundantASide',
'uplinkSets': [uplink_set_1],
},
'Enc5B-LIG': {
'name': 'Enc5B-LIG',
'interconnectMapTemplate': Enc5BMap,
'enclosureIndexes': [1, 2, 3, 4, 5],
'interconnectBaySet': 2,
'redundancyType': 'NonRedundantBSide',
'uplinkSets': [uplink_set_2],
},
}
###
# Enclosure Groups
###
enc_group = {
'Enc2-EG':
{'name': 'Enc2-EG',
'enclosureCount': 2,
'interconnectBayMappings':
[{'interconnectBay': 1, 'logicalInterconnectGroupUri': None},
{'interconnectBay': 2, 'logicalInterconnectGroupUri': 'LIG:Enc2A-LIG'},
{'interconnectBay': 3, 'logicalInterconnectGroupUri': None},
{'interconnectBay': 4, 'logicalInterconnectGroupUri': None},
{'interconnectBay': 5, 'logicalInterconnectGroupUri': 'LIG:Enc2B-LIG'},
{'interconnectBay': 6, 'logicalInterconnectGroupUri': None}],
},
'Enc3-EG':
{'name': 'Enc3-EG',
'enclosureCount': 3,
'interconnectBayMappings':
[{'interconnectBay': 1, 'logicalInterconnectGroupUri': None},
{'interconnectBay': 2, 'logicalInterconnectGroupUri': 'LIG:Enc3A-LIG'},
{'interconnectBay': 3, 'logicalInterconnectGroupUri': None},
{'interconnectBay': 4, 'logicalInterconnectGroupUri': None},
{'interconnectBay': 5, 'logicalInterconnectGroupUri': 'LIG:Enc3B-LIG'},
{'interconnectBay': 6, 'logicalInterconnectGroupUri': None}],
},
'Enc4-EG':
{'name': 'Enc4-EG',
'enclosureCount': 4,
'interconnectBayMappings':
[{'interconnectBay': 1, 'logicalInterconnectGroupUri': None},
{'interconnectBay': 2, 'logicalInterconnectGroupUri': 'LIG:Enc4A-LIG'},
{'interconnectBay': 3, 'logicalInterconnectGroupUri': None},
{'interconnectBay': 4, 'logicalInterconnectGroupUri': None},
{'interconnectBay': 5, 'logicalInterconnectGroupUri': 'LIG:Enc4B-LIG'},
{'interconnectBay': 6, 'logicalInterconnectGroupUri': None}],
},
'Enc5-EG':
{'name': 'Enc5-EG',
'enclosureCount': 5,
'interconnectBayMappings':
[{'interconnectBay': 1, 'logicalInterconnectGroupUri': None},
{'interconnectBay': 2, 'logicalInterconnectGroupUri': 'LIG:Enc5A-LIG'},
{'interconnectBay': 3, 'logicalInterconnectGroupUri': None},
{'interconnectBay': 4, 'logicalInterconnectGroupUri': None},
{'interconnectBay': 5, 'logicalInterconnectGroupUri': 'LIG:Enc5B-LIG'},
{'interconnectBay': 6, 'logicalInterconnectGroupUri': None}],
}
}
###
# Server profiles
###
profiles = {
'Profile1': {
'payload': {
'name': 'Profile1',
'serverHardwareUri': ENC_1 + ', bay 2',
'enclosureUri': ENC_1,
'connectionSettings': {
'connections': [
{ 'name': 'conn',
'functionType': 'Ethernet',
'portId': 'Auto',
'networkUri': 'RNS',
},
]
}
},
'IP': '10.11.0.255',
},
'Profile2': {
'payload': {
'name': 'Profile2',
'serverHardwareUri': ENC_2 + ', bay 2',
'enclosureUri': ENC_2,
'connectionSettings': {
'connections': [
{ 'name': 'conn',
'functionType': 'Ethernet',
'portId': 'Auto',
'networkUri': 'RNS',
}
]
}
},
'IP': '10.12.0.255',
},
'Profile3': {
'payload': {
'name': 'Profile3',
'serverHardwareUri': ENC_3 + ', bay 2',
'enclosureUri': ENC_3,
'connectionSettings': {
'connections': [
{ 'name': 'conn',
'functionType': 'Ethernet',
'portId': 'Auto',
'networkUri': 'RNS',
}
]
}
},
'IP': '10.13.0.255',
},
'Profile4': {
'payload': {
'name': 'Profile4',
'serverHardwareUri': ENC_4 + ', bay 2',
'enclosureUri': ENC_4,
'connectionSettings': {
'connections': [
{ 'name': 'conn',
'functionType': 'Ethernet',
'portId': 'Auto',
'networkUri': 'net_404',
}
]
}
},
'IP': '10.14.0.255',
},
'Profile5': {
'payload': {
'name': 'Profile5',
'serverHardwareUri': ENC_5 + ', bay 2',
'enclosureUri': ENC_5,
'connectionSettings': {
'connections': [
{ 'name': 'conn',
'functionType': 'Ethernet',
'portId': 'Auto',
'networkUri': 'net_405',
}
]
}
},
'IP': '10.15.0.255',
}
}
| [
"[email protected]"
] | |
a4e1afe224daa8082b4cdfd04c6122d18cf4637c | 8cc862aa51d3fec95d094dc4bd3151e1155d240a | /pythonProject/imports/using_sys.py | 4c609d014e0a48228c4d6084ff8263843af572e3 | [] | no_license | activehuahua/python | bcbf3a2190025e2315399bfd0c725f598211632b | cc36a93c01c53f856426ccf2724848142524d9c0 | refs/heads/master | 2023-04-14T10:23:21.590765 | 2019-08-12T06:52:15 | 2019-08-12T06:52:15 | 160,277,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | # -*- coding: utf-8 -*-
'''
@Author : zhaojianghua
@File : using_sys.py
@Time : 2018/12/5 9:54
'''
import sys
print('命令行参数如下:')
for i in sys.argv:
print(i)
print('\n\nPython 路径为:', sys.path, '\n') | [
"[email protected]"
] | |
dba0a06a82fbbfd3e411a7aa0a4f2a0711b37607 | 423ca5205aaf0b2d3bfff9affe2172fec21bfad0 | /web/pgadmin/browser/server_groups/servers/databases/schemas/domains/domain_constraints/tests/test_domain_constraints_add.py | b7f603fdfd4c1adca8a359d0d07247caee18a598 | [
"PostgreSQL"
] | permissive | adityatoshniwal/pgadmin4 | 25cc665d1438f82bdb17f13270933c43e3a98f4b | 2aea5b41ad8b6bd4a408a87a6743fcbfc88ed329 | refs/heads/master | 2023-09-03T20:04:15.941551 | 2023-07-31T09:32:30 | 2023-07-31T09:32:30 | 419,212,569 | 0 | 0 | NOASSERTION | 2023-01-02T05:37:03 | 2021-10-20T06:34:38 | Python | UTF-8 | Python | false | false | 4,748 | py | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2023, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
import uuid
from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as domain_cons_utils
from unittest.mock import patch
class DomainConstraintAddTestCase(BaseTestGenerator):
""" This class will add new domain constraint under schema node. """
scenarios = utils.generate_scenarios('domain_constraint_create',
domain_cons_utils.test_cases)
def setUp(self):
super().setUp()
self.db_name = parent_node_dict["database"][-1]["db_name"]
schema_info = parent_node_dict["schema"][-1]
self.schema_id = schema_info["schema_id"]
self.schema_name = schema_info["schema_name"]
self.server_id = schema_info["server_id"]
self.db_id = schema_info["db_id"]
self.domain_name = "domain_%s" % (str(uuid.uuid4())[1:8])
self.domain_info = domain_cons_utils.create_domain(self.server,
self.db_name,
self.schema_name,
self.schema_id,
self.domain_name)
def create_domain_constraint(self):
"""
This function create a domain constraint and returns it
:return: created domain constraint response
"""
return self.tester.post(self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' +
str(self.db_id) +
'/' + str(self.schema_id) + '/' +
str(self.domain_id) + '/',
data=json.dumps(self.test_data),
content_type='html/json',
follow_redirects=True)
def runTest(self):
""" This function will add domain constraint under test database. """
db_con = database_utils.connect_database(self, utils.SERVER_GROUP,
self.server_id, self.db_id)
if not db_con['data']["connected"]:
raise Exception("Could not connect to database.")
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
self.schema_name)
if not schema_response:
raise Exception("Could not find the schema.")
self.test_data['name'] =\
"test_domain_con_add_%s" % (str(uuid.uuid4())[1:8])
self.domain_id = self.domain_info[0]
if self.is_positive_test:
response = self.create_domain_constraint()
else:
if hasattr(self, "internal_server_error"):
return_value_object = eval(self.mock_data["return_value"])
with patch(self.mock_data["function_name"],
side_effect=[return_value_object]):
response = self.create_domain_constraint()
if hasattr(self, "error_in_db"):
return_value_object = eval(self.mock_data["return_value"])
with patch(self.mock_data["function_name"],
side_effect=[return_value_object]):
response = self.create_domain_constraint()
if hasattr(self, "error_getting_coid"):
with patch(self.mock_data["function_name"],
side_effect=eval(self.mock_data["return_value"])):
response = self.create_domain_constraint()
if hasattr(self, "error_domain_id"):
self.domain_id = 99999
response = self.create_domain_constraint()
actual_response_code = response.status_code
expected_response_code = self.expected_data['status_code']
self.assertEqual(actual_response_code, expected_response_code)
def tearDown(self):
# Disconnect the database
database_utils.disconnect_database(self, self.server_id, self.db_id)
| [
"[email protected]"
] | |
047ddbf79cd824eea356100f84caef3d7a7612d3 | e4a9a67f1d79b3430aa43ebdb905a08717ee118a | /COT/helpers/tests/test_vmdktool.py | 58e36f57ba914bde6ee0b591ad71cdbf5d7e9a20 | [
"MIT"
] | permissive | digideskio/cot | 30c724c5b76abd5187a9c1e3c6f15a462b324da8 | 8fc84c8c72a9acb4adffca859154055f2857b53f | refs/heads/master | 2021-01-12T20:50:05.208963 | 2016-05-11T15:31:29 | 2016-05-11T15:31:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,826 | py | #!/usr/bin/env python
#
# test_vmdktool.py - Unit test cases for COT.helpers.vmdktoolsubmodule.
#
# March 2015, Glenn F. Matthews
# Copyright (c) 2014-2015 the COT project developers.
# See the COPYRIGHT.txt file at the top-level directory of this distribution
# and at https://github.com/glennmatthews/cot/blob/master/COPYRIGHT.txt.
#
# This file is part of the Common OVF Tool (COT) project.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at
# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part
# of COT, including this file, may be copied, modified, propagated, or
# distributed except according to the terms contained in the LICENSE.txt file.
"""Unit test cases for the COT.helpers.vmdktool submodule."""
import mock
import os
from distutils.version import StrictVersion
from .test_helper import HelperUT
from COT.helpers.helper import Helper
from COT.helpers.vmdktool import VmdkTool
class TestVmdkTool(HelperUT):
"""Test cases for VmdkTool helper class."""
def setUp(self):
"""Test case setup function called automatically prior to each test."""
self.helper = VmdkTool()
super(TestVmdkTool, self).setUp()
def test_get_version(self):
"""Test .version getter logic."""
self.fake_output = "vmdktool version 1.4"
self.assertEqual(StrictVersion("1.4"), self.helper.version)
def test_install_helper_already_present(self):
"""Do nothing instead of re-installing."""
self.helper.install_helper()
self.assertEqual([], self.last_argv)
self.assertLogged(**self.ALREADY_INSTALLED)
@mock.patch('os.path.isdir')
@mock.patch('os.path.exists')
@mock.patch('os.makedirs')
def test_install_helper_apt_get(self,
mock_makedirs,
mock_exists,
mock_isdir):
"""Test installation via 'apt-get'."""
mock_isdir.return_value = False
mock_exists.return_value = False
mock_makedirs.side_effect = OSError
Helper.find_executable = self.stub_find_executable
Helper.PACKAGE_MANAGERS['apt-get'] = True
Helper.PACKAGE_MANAGERS['port'] = False
Helper.PACKAGE_MANAGERS['yum'] = False
Helper._apt_updated = False
self.fake_output = 'is not installed and no information is available'
self.system = 'Linux'
os.environ['PREFIX'] = '/usr/local'
if 'DESTDIR' in os.environ:
del os.environ['DESTDIR']
self.helper.install_helper()
self.assertEqual([
['dpkg', '-s', 'make'],
['sudo', 'apt-get', '-q', 'update'],
['sudo', 'apt-get', '-q', 'install', 'make'],
['dpkg', '-s', 'zlib1g-dev'],
['sudo', 'apt-get', '-q', 'install', 'zlib1g-dev'],
['make', 'CFLAGS="-D_GNU_SOURCE -g -O -pipe"'],
['sudo', 'mkdir', '-p', '--mode=755', '/usr/local/man/man8'],
['sudo', 'mkdir', '-p', '--mode=755', '/usr/local/bin'],
['make', 'install', 'PREFIX=/usr/local'],
], self.last_argv)
self.assertTrue(Helper._apt_updated)
# Make sure we don't 'apt-get update/install' again unnecessarily
self.fake_output = 'install ok installed'
os.environ['PREFIX'] = '/opt/local'
os.environ['DESTDIR'] = '/home/cot'
self.last_argv = []
self.helper.install_helper()
self.assertEqual([
['dpkg', '-s', 'make'],
['dpkg', '-s', 'zlib1g-dev'],
['make', 'CFLAGS="-D_GNU_SOURCE -g -O -pipe"'],
['sudo', 'mkdir', '-p', '--mode=755',
'/home/cot/opt/local/man/man8'],
['sudo', 'mkdir', '-p', '--mode=755', '/home/cot/opt/local/bin'],
['make', 'install', 'PREFIX=/opt/local', 'DESTDIR=/home/cot'],
], self.last_argv)
def test_install_helper_port(self):
"""Test installation via 'port'."""
Helper.find_executable = self.stub_find_executable
Helper.PACKAGE_MANAGERS['port'] = True
Helper._port_updated = False
self.helper.install_helper()
self.assertEqual([
['sudo', 'port', 'selfupdate'],
['sudo', 'port', 'install', 'vmdktool']
], self.last_argv)
self.assertTrue(Helper._port_updated)
# Make sure we don't 'port selfupdate' again unnecessarily
self.last_argv = []
self.helper.install_helper()
self.assertEqual([
['sudo', 'port', 'install', 'vmdktool']
], self.last_argv)
@mock.patch('os.path.isdir')
@mock.patch('os.path.exists')
@mock.patch('os.makedirs')
def test_install_helper_yum(self,
mock_makedirs,
mock_exists,
mock_isdir):
"""Test installation via 'yum'."""
mock_isdir.return_value = False
mock_exists.return_value = False
mock_makedirs.side_effect = OSError
Helper.find_executable = self.stub_find_executable
Helper.PACKAGE_MANAGERS['apt-get'] = False
Helper.PACKAGE_MANAGERS['port'] = False
Helper.PACKAGE_MANAGERS['yum'] = True
self.system = 'Linux'
os.environ['PREFIX'] = '/usr/local'
if 'DESTDIR' in os.environ:
del os.environ['DESTDIR']
self.helper.install_helper()
self.assertEqual([
['sudo', 'yum', '--quiet', 'install', 'make'],
['sudo', 'yum', '--quiet', 'install', 'zlib-devel'],
['make', 'CFLAGS="-D_GNU_SOURCE -g -O -pipe"'],
['sudo', 'mkdir', '-p', '--mode=755', '/usr/local/man/man8'],
['sudo', 'mkdir', '-p', '--mode=755', '/usr/local/bin'],
['make', 'install', 'PREFIX=/usr/local'],
], self.last_argv)
def test_install_helper_unsupported(self):
"""Unable to install without a package manager."""
Helper.find_executable = self.stub_find_executable
Helper.PACKAGE_MANAGERS['apt-get'] = False
Helper.PACKAGE_MANAGERS['port'] = False
Helper.PACKAGE_MANAGERS['yum'] = False
with self.assertRaises(NotImplementedError):
self.helper.install_helper()
def test_convert_unsupported(self):
"""Negative test - conversion to unsupported format/subformat."""
with self.assertRaises(NotImplementedError):
self.helper.convert_disk_image(self.blank_vmdk, self.temp_dir,
'qcow2')
with self.assertRaises(NotImplementedError):
self.helper.convert_disk_image(self.blank_vmdk, self.temp_dir,
'vmdk', 'monolithicSparse')
| [
"[email protected]"
] | |
6a3224e913eb781e8bbc458e3aa6542e3d77fee1 | 32c56293475f49c6dd1b0f1334756b5ad8763da9 | /google-cloud-sdk/lib/googlecloudsdk/third_party/apis/recommender/v1/recommender_v1_messages.py | eda70fa864fcf63f971b6e745b71d29eb49dc075 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bopopescu/socialliteapp | b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494 | 85bb264e273568b5a0408f733b403c56373e2508 | refs/heads/master | 2022-11-20T03:01:47.654498 | 2020-02-01T20:29:43 | 2020-02-01T20:29:43 | 282,403,750 | 0 | 0 | MIT | 2020-07-25T08:31:59 | 2020-07-25T08:31:59 | null | UTF-8 | Python | false | false | 28,004 | py | """Generated message classes for recommender version v1.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'recommender'
class GoogleCloudRecommenderV1CostProjection(_messages.Message):
r"""Contains metadata about how much money a recommendation can save or
incur.
Fields:
cost: An approximate projection on amount saved or amount incurred.
Negative cost units indicate cost savings and positive cost units
indicate increase. See google.type.Money documentation for
positive/negative units.
duration: Duration for which this cost applies.
"""
cost = _messages.MessageField('GoogleTypeMoney', 1)
duration = _messages.StringField(2)
class GoogleCloudRecommenderV1Impact(_messages.Message):
r"""Contains the impact a recommendation can have for a given category.
Enums:
CategoryValueValuesEnum: Category that is being targeted.
Fields:
category: Category that is being targeted.
costProjection: Use with CategoryType.COST
"""
class CategoryValueValuesEnum(_messages.Enum):
r"""Category that is being targeted.
Values:
CATEGORY_UNSPECIFIED: Default unspecified category. Don't use directly.
COST: Indicates a potential increase or decrease in cost.
SECURITY: Indicates a potential increase or decrease in security.
PERFORMANCE: Indicates a potential increase or decrease in performance.
MANAGEABILITY: Indicates a potential increase or decrease in
manageability.
"""
CATEGORY_UNSPECIFIED = 0
COST = 1
SECURITY = 2
PERFORMANCE = 3
MANAGEABILITY = 4
category = _messages.EnumField('CategoryValueValuesEnum', 1)
costProjection = _messages.MessageField('GoogleCloudRecommenderV1CostProjection', 2)
class GoogleCloudRecommenderV1ListRecommendationsResponse(_messages.Message):
r"""Response to the `ListRecommendations` method.
Fields:
nextPageToken: A token that can be used to request the next page of
results. This field is empty if there are no additional results.
recommendations: The set of recommendations for the `parent` resource.
"""
nextPageToken = _messages.StringField(1)
recommendations = _messages.MessageField('GoogleCloudRecommenderV1Recommendation', 2, repeated=True)
class GoogleCloudRecommenderV1MarkRecommendationClaimedRequest(_messages.Message):
r"""Request for the `MarkRecommendationClaimed` Method.
Messages:
StateMetadataValue: State properties to include with this state.
Overwrites any existing `state_metadata`. Keys must match the regex
/^a-z0-9{0,62}$/. Values must match the regex
/^[a-zA-Z0-9_./-]{0,255}$/.
Fields:
etag: Required. Fingerprint of the Recommendation. Provides optimistic
locking.
stateMetadata: State properties to include with this state. Overwrites any
existing `state_metadata`. Keys must match the regex /^a-z0-9{0,62}$/.
Values must match the regex /^[a-zA-Z0-9_./-]{0,255}$/.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class StateMetadataValue(_messages.Message):
r"""State properties to include with this state. Overwrites any existing
`state_metadata`. Keys must match the regex /^a-z0-9{0,62}$/. Values must
match the regex /^[a-zA-Z0-9_./-]{0,255}$/.
Messages:
AdditionalProperty: An additional property for a StateMetadataValue
object.
Fields:
additionalProperties: Additional properties of type StateMetadataValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a StateMetadataValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
etag = _messages.StringField(1)
stateMetadata = _messages.MessageField('StateMetadataValue', 2)
class GoogleCloudRecommenderV1MarkRecommendationFailedRequest(_messages.Message):
r"""Request for the `MarkRecommendationFailed` Method.
Messages:
StateMetadataValue: State properties to include with this state.
Overwrites any existing `state_metadata`. Keys must match the regex
/^a-z0-9{0,62}$/. Values must match the regex
/^[a-zA-Z0-9_./-]{0,255}$/.
Fields:
etag: Required. Fingerprint of the Recommendation. Provides optimistic
locking.
stateMetadata: State properties to include with this state. Overwrites any
existing `state_metadata`. Keys must match the regex /^a-z0-9{0,62}$/.
Values must match the regex /^[a-zA-Z0-9_./-]{0,255}$/.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class StateMetadataValue(_messages.Message):
r"""State properties to include with this state. Overwrites any existing
`state_metadata`. Keys must match the regex /^a-z0-9{0,62}$/. Values must
match the regex /^[a-zA-Z0-9_./-]{0,255}$/.
Messages:
AdditionalProperty: An additional property for a StateMetadataValue
object.
Fields:
additionalProperties: Additional properties of type StateMetadataValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a StateMetadataValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
etag = _messages.StringField(1)
stateMetadata = _messages.MessageField('StateMetadataValue', 2)
class GoogleCloudRecommenderV1MarkRecommendationSucceededRequest(_messages.Message):
r"""Request for the `MarkRecommendationSucceeded` Method.
Messages:
StateMetadataValue: State properties to include with this state.
Overwrites any existing `state_metadata`. Keys must match the regex
/^a-z0-9{0,62}$/. Values must match the regex
/^[a-zA-Z0-9_./-]{0,255}$/.
Fields:
etag: Required. Fingerprint of the Recommendation. Provides optimistic
locking.
stateMetadata: State properties to include with this state. Overwrites any
existing `state_metadata`. Keys must match the regex /^a-z0-9{0,62}$/.
Values must match the regex /^[a-zA-Z0-9_./-]{0,255}$/.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class StateMetadataValue(_messages.Message):
r"""State properties to include with this state. Overwrites any existing
`state_metadata`. Keys must match the regex /^a-z0-9{0,62}$/. Values must
match the regex /^[a-zA-Z0-9_./-]{0,255}$/.
Messages:
AdditionalProperty: An additional property for a StateMetadataValue
object.
Fields:
additionalProperties: Additional properties of type StateMetadataValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a StateMetadataValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
etag = _messages.StringField(1)
stateMetadata = _messages.MessageField('StateMetadataValue', 2)
class GoogleCloudRecommenderV1Operation(_messages.Message):
r"""Contains an operation for a resource loosely based on the JSON-PATCH
format with support for: * Custom filters for describing partial array
patch. * Extended path values for describing nested arrays. * Custom fields
for describing the resource for which the operation is being described. *
Allows extension to custom operations not natively supported by RFC6902. See
https://tools.ietf.org/html/rfc6902 for details on the original RFC.
Messages:
PathFiltersValue: Set of filters to apply if `path` refers to array
elements or nested array elements in order to narrow down to a single
unique element that is being tested/modified. This is intended to be an
exact match per filter. To perform advanced matching, use
path_value_matchers. * Example: { "/versions/*/name" : "it-123"
"/versions/*/targetSize/percent": 20 } * Example: {
"/bindings/*/role": "roles/admin" "/bindings/*/condition" : null } *
Example: { "/bindings/*/role": "roles/admin" "/bindings/*/members/*"
: ["[email protected]", "[email protected]"] } When both path_filters and
path_value_matchers are set, an implicit AND must be performed.
PathValueMatchersValue: Similar to path_filters, this contains set of
filters to apply if `path` field referes to array elements. This is
meant to support value matching beyond exact match. To perform exact
match, use path_filters. When both path_filters and path_value_matchers
are set, an implicit AND must be performed.
Fields:
action: Type of this operation. Contains one of 'and', 'remove',
'replace', 'move', 'copy', 'test' and custom operations. This field is
case-insensitive and always populated.
path: Path to the target field being operated on. If the operation is at
the resource level, then path should be "/". This field is always
populated.
pathFilters: Set of filters to apply if `path` refers to array elements or
nested array elements in order to narrow down to a single unique element
that is being tested/modified. This is intended to be an exact match per
filter. To perform advanced matching, use path_value_matchers. *
Example: { "/versions/*/name" : "it-123"
"/versions/*/targetSize/percent": 20 } * Example: {
"/bindings/*/role": "roles/admin" "/bindings/*/condition" : null } *
Example: { "/bindings/*/role": "roles/admin" "/bindings/*/members/*"
: ["[email protected]", "[email protected]"] } When both path_filters and
path_value_matchers are set, an implicit AND must be performed.
pathValueMatchers: Similar to path_filters, this contains set of filters
to apply if `path` field referes to array elements. This is meant to
support value matching beyond exact match. To perform exact match, use
path_filters. When both path_filters and path_value_matchers are set, an
implicit AND must be performed.
resource: Contains the fully qualified resource name. This field is always
populated. ex: //cloudresourcemanager.googleapis.com/projects/foo.
resourceType: Type of GCP resource being modified/tested. This field is
always populated. Example: cloudresourcemanager.googleapis.com/Project,
compute.googleapis.com/Instance
sourcePath: Can be set with action 'copy' or 'move' to indicate the source
field within resource or source_resource, ignored if provided for other
operation types.
sourceResource: Can be set with action 'copy' to copy resource
configuration across different resources of the same type. Example: A
resource clone can be done via action = 'copy', path = "/", from = "/",
source_resource = <source> and resource_name = <target>. This field is
empty for all other values of `action`.
value: Value for the `path` field. Will be set for
actions:'add'/'replace'. Maybe set for action: 'test'. Either this or
`value_matcher` will be set for 'test' operation. An exact match must be
performed.
valueMatcher: Can be set for action 'test' for advanced matching for the
value of 'path' field. Either this or `value` will be set for 'test'
operation.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PathFiltersValue(_messages.Message):
r"""Set of filters to apply if `path` refers to array elements or nested
array elements in order to narrow down to a single unique element that is
being tested/modified. This is intended to be an exact match per filter.
To perform advanced matching, use path_value_matchers. * Example: {
"/versions/*/name" : "it-123" "/versions/*/targetSize/percent": 20 } *
Example: { "/bindings/*/role": "roles/admin" "/bindings/*/condition" :
null } * Example: { "/bindings/*/role": "roles/admin"
"/bindings/*/members/*" : ["[email protected]", "[email protected]"] } When both
path_filters and path_value_matchers are set, an implicit AND must be
performed.
Messages:
AdditionalProperty: An additional property for a PathFiltersValue
object.
Fields:
additionalProperties: Additional properties of type PathFiltersValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a PathFiltersValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class PathValueMatchersValue(_messages.Message):
r"""Similar to path_filters, this contains set of filters to apply if
`path` field referes to array elements. This is meant to support value
matching beyond exact match. To perform exact match, use path_filters.
When both path_filters and path_value_matchers are set, an implicit AND
must be performed.
Messages:
AdditionalProperty: An additional property for a PathValueMatchersValue
object.
Fields:
additionalProperties: Additional properties of type
PathValueMatchersValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a PathValueMatchersValue object.
Fields:
key: Name of the additional property.
value: A GoogleCloudRecommenderV1ValueMatcher attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('GoogleCloudRecommenderV1ValueMatcher', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
action = _messages.StringField(1)
path = _messages.StringField(2)
pathFilters = _messages.MessageField('PathFiltersValue', 3)
pathValueMatchers = _messages.MessageField('PathValueMatchersValue', 4)
resource = _messages.StringField(5)
resourceType = _messages.StringField(6)
sourcePath = _messages.StringField(7)
sourceResource = _messages.StringField(8)
value = _messages.MessageField('extra_types.JsonValue', 9)
valueMatcher = _messages.MessageField('GoogleCloudRecommenderV1ValueMatcher', 10)
class GoogleCloudRecommenderV1OperationGroup(_messages.Message):
r"""Group of operations that need to be performed atomically.
Fields:
operations: List of operations across one or more resources that belong to
this group. Loosely based on RFC6902 and should be performed in the
order they appear.
"""
operations = _messages.MessageField('GoogleCloudRecommenderV1Operation', 1, repeated=True)
class GoogleCloudRecommenderV1Recommendation(_messages.Message):
r"""A recommendation along with a suggested action. E.g., a rightsizing
recommendation for an underutilized VM, IAM role recommendations, etc
Fields:
additionalImpact: Optional set of additional impact that this
recommendation may have when trying to optimize for the primary
category. These may be positive or negative.
content: Content of the recommendation describing recommended changes to
resources.
description: Free-form human readable summary in English. The maximum
length is 500 characters.
etag: Fingerprint of the Recommendation. Provides optimistic locking when
updating states.
lastRefreshTime: Last time this recommendation was refreshed by the system
that created it in the first place.
name: Name of recommendation.
primaryImpact: The primary impact that this recommendation can have while
trying to optimize for one category.
recommenderSubtype: Contains an identifier for a subtype of
recommendations produced for the same recommender. Subtype is a function
of content and impact, meaning a new subtype might be added when
significant changes to `content` or `primary_impact.category` are
introduced. See the Recommenders section to see a list of subtypes for a
given Recommender. Examples: For recommender =
"google.iam.policy.Recommender", recommender_subtype can be one of
"REMOVE_ROLE"/"REPLACE_ROLE"
stateInfo: Information for state. Contains state and metadata.
"""
additionalImpact = _messages.MessageField('GoogleCloudRecommenderV1Impact', 1, repeated=True)
content = _messages.MessageField('GoogleCloudRecommenderV1RecommendationContent', 2)
description = _messages.StringField(3)
etag = _messages.StringField(4)
lastRefreshTime = _messages.StringField(5)
name = _messages.StringField(6)
primaryImpact = _messages.MessageField('GoogleCloudRecommenderV1Impact', 7)
recommenderSubtype = _messages.StringField(8)
stateInfo = _messages.MessageField('GoogleCloudRecommenderV1RecommendationStateInfo', 9)
class GoogleCloudRecommenderV1RecommendationContent(_messages.Message):
r"""Contains what resources are changing and how they are changing.
Fields:
operationGroups: Operations to one or more Google Cloud resources grouped
in such a way that, all operations within one group are expected to be
performed atomically and in an order.
"""
operationGroups = _messages.MessageField('GoogleCloudRecommenderV1OperationGroup', 1, repeated=True)
class GoogleCloudRecommenderV1RecommendationStateInfo(_messages.Message):
r"""Information for state. Contains state and metadata.
Enums:
StateValueValuesEnum: The state of the recommendation, Eg ACTIVE,
SUCCEEDED, FAILED.
Messages:
StateMetadataValue: A map of metadata for the state, provided by user or
automations systems.
Fields:
state: The state of the recommendation, Eg ACTIVE, SUCCEEDED, FAILED.
stateMetadata: A map of metadata for the state, provided by user or
automations systems.
"""
class StateValueValuesEnum(_messages.Enum):
r"""The state of the recommendation, Eg ACTIVE, SUCCEEDED, FAILED.
Values:
STATE_UNSPECIFIED: Default state. Don't use directly.
ACTIVE: Recommendation is active and can be applied. Recommendations
content can be updated by Google. ACTIVE recommendations can be
marked as CLAIMED, SUCCEEDED, or FAILED.
CLAIMED: Recommendation is in claimed state. Recommendations content is
immutable and cannot be updated by Google. CLAIMED recommendations
can be marked as CLAIMED, SUCCEEDED, or FAILED.
SUCCEEDED: Recommendation is in succeeded state. Recommendations content
is immutable and cannot be updated by Google. SUCCEEDED
recommendations can be marked as SUCCEEDED, or FAILED.
FAILED: Recommendation is in failed state. Recommendations content is
immutable and cannot be updated by Google. FAILED recommendations can
be marked as SUCCEEDED, or FAILED.
DISMISSED: Recommendation is in dismissed state. Recommendation content
can be updated by Google. DISMISSED recommendations can be marked as
ACTIVE.
"""
STATE_UNSPECIFIED = 0
ACTIVE = 1
CLAIMED = 2
SUCCEEDED = 3
FAILED = 4
DISMISSED = 5
@encoding.MapUnrecognizedFields('additionalProperties')
class StateMetadataValue(_messages.Message):
r"""A map of metadata for the state, provided by user or automations
systems.
Messages:
AdditionalProperty: An additional property for a StateMetadataValue
object.
Fields:
additionalProperties: Additional properties of type StateMetadataValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a StateMetadataValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
state = _messages.EnumField('StateValueValuesEnum', 1)
stateMetadata = _messages.MessageField('StateMetadataValue', 2)
class GoogleCloudRecommenderV1ValueMatcher(_messages.Message):
r"""Contains various matching options for values for a GCP resource field.
Fields:
matchesPattern: To be used for full regex matching. The regular expression
is using the Google RE2 syntax
(https://github.com/google/re2/wiki/Syntax), so to be used with
RE2::FullMatch
"""
matchesPattern = _messages.StringField(1)
class GoogleTypeMoney(_messages.Message):
r"""Represents an amount of money with its currency type.
Fields:
currencyCode: The 3-letter currency code defined in ISO 4217.
nanos: Number of nano (10^-9) units of the amount. The value must be
between -999,999,999 and +999,999,999 inclusive. If `units` is positive,
`nanos` must be positive or zero. If `units` is zero, `nanos` can be
positive, zero, or negative. If `units` is negative, `nanos` must be
negative or zero. For example $-1.75 is represented as `units`=-1 and
`nanos`=-750,000,000.
units: The whole units of the amount. For example if `currencyCode` is
`"USD"`, then 1 unit is one US dollar.
"""
currencyCode = _messages.StringField(1)
nanos = _messages.IntegerField(2, variant=_messages.Variant.INT32)
units = _messages.IntegerField(3)
class RecommenderProjectsLocationsRecommendersRecommendationsGetRequest(_messages.Message):
r"""A RecommenderProjectsLocationsRecommendersRecommendationsGetRequest
object.
Fields:
name: Required. Name of the recommendation.
"""
name = _messages.StringField(1, required=True)
class RecommenderProjectsLocationsRecommendersRecommendationsListRequest(_messages.Message):
r"""A RecommenderProjectsLocationsRecommendersRecommendationsListRequest
object.
Fields:
filter: Filter expression to restrict the recommendations returned.
Supported filter fields: state_info.state Eg:
`state_info.state:"DISMISSED" or state_info.state:"FAILED"
pageSize: Optional. The maximum number of results to return from this
request. Non-positive values are ignored. If not specified, the server
will determine the number of results to return.
pageToken: Optional. If present, retrieves the next batch of results from
the preceding call to this method. `page_token` must be the value of
`next_page_token` from the previous response. The values of other method
parameters must be identical to those in the previous call.
parent: Required. The container resource on which to execute the request.
Acceptable formats: 1. "projects/[PROJECT_NUMBER]/locations/[LOCATION]/
recommenders/[RECOMMENDER_ID]", LOCATION here refers to GCP Locations:
https://cloud.google.com/about/locations/
"""
filter = _messages.StringField(1)
pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(3)
parent = _messages.StringField(4, required=True)
class RecommenderProjectsLocationsRecommendersRecommendationsMarkClaimedRequest(_messages.Message):
r"""A
RecommenderProjectsLocationsRecommendersRecommendationsMarkClaimedRequest
object.
Fields:
googleCloudRecommenderV1MarkRecommendationClaimedRequest: A
GoogleCloudRecommenderV1MarkRecommendationClaimedRequest resource to be
passed as the request body.
name: Required. Name of the recommendation.
"""
googleCloudRecommenderV1MarkRecommendationClaimedRequest = _messages.MessageField('GoogleCloudRecommenderV1MarkRecommendationClaimedRequest', 1)
name = _messages.StringField(2, required=True)
class RecommenderProjectsLocationsRecommendersRecommendationsMarkFailedRequest(_messages.Message):
r"""A
RecommenderProjectsLocationsRecommendersRecommendationsMarkFailedRequest
object.
Fields:
googleCloudRecommenderV1MarkRecommendationFailedRequest: A
GoogleCloudRecommenderV1MarkRecommendationFailedRequest resource to be
passed as the request body.
name: Required. Name of the recommendation.
"""
googleCloudRecommenderV1MarkRecommendationFailedRequest = _messages.MessageField('GoogleCloudRecommenderV1MarkRecommendationFailedRequest', 1)
name = _messages.StringField(2, required=True)
class RecommenderProjectsLocationsRecommendersRecommendationsMarkSucceededRequest(_messages.Message):
r"""A
RecommenderProjectsLocationsRecommendersRecommendationsMarkSucceededRequest
object.
Fields:
googleCloudRecommenderV1MarkRecommendationSucceededRequest: A
GoogleCloudRecommenderV1MarkRecommendationSucceededRequest resource to
be passed as the request body.
name: Required. Name of the recommendation.
"""
googleCloudRecommenderV1MarkRecommendationSucceededRequest = _messages.MessageField('GoogleCloudRecommenderV1MarkRecommendationSucceededRequest', 1)
name = _messages.StringField(2, required=True)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
| [
"[email protected]"
] | |
21a55c48c32b7f9d97cc1dbe4eb947d069f083cd | e11dff811ca981f428644fd70d10a7369c671bcb | /src/tools/ecos/cvxpy/doc/source/conf.py | 8aff00ceb390bff1a69901c078f9f08166fdb1a6 | [
"GPL-3.0-only",
"GPL-3.0-or-later",
"MIT"
] | permissive | riadnassiffe/Simulator | 3c4a036b5635534929fdb04b0e9c96d64c0da71f | 7d9ff09f26367d3714e3d10be3dd4a9817b8ed6b | refs/heads/master | 2021-06-20T09:31:36.033427 | 2021-04-17T00:03:17 | 2021-04-17T00:03:17 | 16,033,879 | 0 | 0 | MIT | 2021-03-22T23:20:34 | 2014-01-18T20:58:10 | Jupyter Notebook | UTF-8 | Python | false | false | 8,937 | py | # -*- coding: utf-8 -*-
#
# CVXPY documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 27 20:47:07 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# To import CVXPY:
sys.path.insert(0, os.path.abspath('../..'))
# To import sphinx extensions we've put in the repository:
sys.path.insert(0, os.path.abspath('../sphinxext'))
__version__ = "0.2.17"
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.doctest', 'sphinx.ext.mathbase',
'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage',
'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 'numpydoc']
# To suppress autodoc/numpydoc warning.
# http://stackoverflow.com/questions/12206334/sphinx-autosummary-toctree-contains-reference-to-nonexisting-document-warnings
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CVXPY'
copyright = u'2014, Steven Diamond, Eric Chu, Stephen Boyd'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import alabaster
table_styling_embed_css = False
html_theme_path = [alabaster.get_path(), "../themes"]
extensions += ['alabaster']
html_theme = 'cvxpy_alabaster'
html_sidebars = {
'**': [
'about.html', 'navigation.html', 'searchbox.html',
]
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'github_user': 'cvxgrp',
'github_repo': 'cvxpy',
'github_banner': True,
'travis_button': True,
'analytics_id': 'UA-50248335-1',
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['../themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cvxpydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'cvxpy.tex', u'CVXPY Documentation',
u'Steven Diamond, Eric Chu, Stephen Boyd', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cvxpy', u'CVXPY Documentation',
[u'Steven Diamond, Eric Chu, Stephen Boyd'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'cvxpy', u'CVXPY Documentation',
u'Steven Diamond, Eric Chu, Stephen Boyd', 'CVXPY', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| [
"[email protected]"
] | |
e99a04d4cf4a320eea6c46709c2ddc8dda6c5981 | b648a0ff402d23a6432643879b0b81ebe0bc9685 | /scripts/json-equals.py | 7704bba587a813f68c8ef03a1c5c9db9af33cc46 | [
"Apache-2.0"
] | permissive | jviotti/binary-json-size-benchmark | 4712faca2724d47d23efef241983ce875dc71cee | 165b577884ef366348bf48042fddf54aacfe647a | refs/heads/main | 2023-04-18T01:40:26.141995 | 2022-12-19T13:25:35 | 2022-12-19T13:25:35 | 337,583,132 | 21 | 1 | Apache-2.0 | 2022-12-17T21:53:56 | 2021-02-10T01:18:05 | C++ | UTF-8 | Python | false | false | 323 | py | import sys
import json
with open(sys.argv[1], mode='r') as json_data:
data1 = json.loads(json_data.read())
with open(sys.argv[2], mode='r') as json_data:
data2 = json.loads(json_data.read())
if data1 == data2:
print("Files are equal!")
sys.exit(0)
else:
print("Files are NOT equal!")
sys.exit(1)
| [
"[email protected]"
] | |
8018f217c6e18cdb2bb6f2517df37cde252a8c36 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2007/programming/libs/exiv2/actions.py | 368c4462143cec10373c6bedb7c05cc40a320ad6 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2006 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
def setup():
autotools.configure()
def build():
autotools.make()
def install():
autotools.install()
pisitools.dodoc("README","doc/ChangeLog")
pisitools.dohtml("doc/html/*")
| [
"[email protected]"
] | |
3d41370064cc5c6bec3ad723efc74e67f721926e | 80301f1cffc5afce13256e2ecab6323c5df00194 | /cn.fc/py/R3401.蔡斯.py | 9fa95698861d8f5919977d10088b392520536ce9 | [] | no_license | ZhenjianYang/SoraVoiceScripts | c1ddf7c1bbcb933243754f9669bd6b75777c87b9 | 94a948090aba0f63b10b2c69dc845dc99c822fc4 | refs/heads/master | 2023-04-18T04:54:44.306652 | 2023-04-06T11:15:17 | 2023-04-06T11:15:17 | 103,167,541 | 43 | 11 | null | 2021-03-06T08:52:54 | 2017-09-11T17:36:55 | Python | UTF-8 | Python | false | false | 37,356 | py | from ED6ScenarioHelper import *
def main():
# 蔡斯
CreateScenaFile(
FileName = 'R3401 ._SN',
MapName = 'Zeiss',
Location = 'R3401.x',
MapIndex = 1,
MapDefaultBGM = "ed60030",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'魔兽', # 9
'魔兽', # 10
'魔兽', # 11
'魔兽', # 12
'艾尔·雷登关所方向', # 13
'蔡斯方向', # 14
' ', # 15
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 0,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT09/CH10750 ._CH', # 00
'ED6_DT07/CH00160 ._CH', # 01
'ED6_DT07/CH00162 ._CH', # 02
'ED6_DT07/CH00100 ._CH', # 03
'ED6_DT07/CH00101 ._CH', # 04
'ED6_DT07/CH00110 ._CH', # 05
'ED6_DT07/CH00111 ._CH', # 06
'ED6_DT07/CH00102 ._CH', # 07
'ED6_DT07/CH00161 ._CH', # 08
'ED6_DT09/CH10130 ._CH', # 09
'ED6_DT09/CH10131 ._CH', # 0A
'ED6_DT09/CH10750 ._CH', # 0B
'ED6_DT09/CH10751 ._CH', # 0C
'ED6_DT09/CH10760 ._CH', # 0D
'ED6_DT09/CH10761 ._CH', # 0E
'ED6_DT09/CH10770 ._CH', # 0F
'ED6_DT09/CH10771 ._CH', # 10
)
AddCharChipPat(
'ED6_DT09/CH10750P._CP', # 00
'ED6_DT07/CH00160P._CP', # 01
'ED6_DT07/CH00162P._CP', # 02
'ED6_DT07/CH00100P._CP', # 03
'ED6_DT07/CH00101P._CP', # 04
'ED6_DT07/CH00110P._CP', # 05
'ED6_DT07/CH00111P._CP', # 06
'ED6_DT07/CH00102P._CP', # 07
'ED6_DT07/CH00161P._CP', # 08
'ED6_DT09/CH10130P._CP', # 09
'ED6_DT09/CH10131P._CP', # 0A
'ED6_DT09/CH10750P._CP', # 0B
'ED6_DT09/CH10751P._CP', # 0C
'ED6_DT09/CH10760P._CP', # 0D
'ED6_DT09/CH10761P._CP', # 0E
'ED6_DT09/CH10770P._CP', # 0F
'ED6_DT09/CH10771P._CP', # 10
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x0,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x0,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x0,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x0,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 169300,
Z = 0,
Y = -27030,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0xFF,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 330710,
Z = 0,
Y = -37560,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0xFF,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x0,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclMonster(
X = 257600,
Z = 70,
Y = -24310,
Unknown_0C = 180,
Unknown_0E = 15,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x1D3,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = 286240,
Z = 20,
Y = -35830,
Unknown_0C = 180,
Unknown_0E = 9,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x1D1,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclEvent(
X = 222300,
Y = -1000,
Z = -28000,
Range = 217700,
Unknown_10 = 0x7D0,
Unknown_14 = 0xFFFF6CBC,
Unknown_18 = 0x0,
Unknown_1C = 4,
)
DeclActor(
TriggerX = 199000,
TriggerZ = 500,
TriggerY = -22200,
TriggerRange = 800,
ActorX = 199000,
ActorZ = 1500,
ActorY = -22200,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 3,
Unknown_22 = 0,
)
DeclActor(
TriggerX = 285640,
TriggerZ = 0,
TriggerY = -26290,
TriggerRange = 1000,
ActorX = 285640,
ActorZ = 1000,
ActorY = -26290,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 5,
Unknown_22 = 0,
)
ScpFunction(
"Function_0_2B2", # 00, 0
"Function_1_2B3", # 01, 1
"Function_2_324", # 02, 2
"Function_3_4AC", # 03, 3
"Function_4_637", # 04, 4
"Function_5_1E52", # 05, 5
)
def Function_0_2B2(): pass
label("Function_0_2B2")
Return()
# Function_0_2B2 end
def Function_1_2B3(): pass
label("Function_1_2B3")
OP_16(0x2, 0xFA0, 0x1F018, 0xFFFD9AB8, 0x30038)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA0, 6)), scpexpr(EXPR_END)), "loc_2DA")
OP_71(0x0, 0x4)
OP_72(0x1, 0x4)
OP_64(0x0, 0x1)
label("loc_2DA")
LoadEffect(0x0, "map\\\\mp027_00.eff")
PlayEffect(0x0, 0x0, 0xFF, 285640, 1000, -26290, 0, 0, 0, 1300, 1300, 1300, 0xFF, 0, 0, 0, 0)
Return()
# Function_1_2B3 end
def Function_2_324(): pass
label("Function_2_324")
OP_51(0xFE, 0x28, (scpexpr(EXPR_PUSH_LONG, 0x8), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
RunExpression(0x0, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 0xE), scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_354")
OP_99(0xFE, 0x0, 0x7, 0x672)
Jump("loc_496")
label("loc_354")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_36D")
OP_99(0xFE, 0x1, 0x7, 0x640)
Jump("loc_496")
label("loc_36D")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_386")
OP_99(0xFE, 0x2, 0x7, 0x60E)
Jump("loc_496")
label("loc_386")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_39F")
OP_99(0xFE, 0x3, 0x7, 0x5DC)
Jump("loc_496")
label("loc_39F")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x4), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_3B8")
OP_99(0xFE, 0x4, 0x7, 0x5AA)
Jump("loc_496")
label("loc_3B8")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x5), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_3D1")
OP_99(0xFE, 0x5, 0x7, 0x578)
Jump("loc_496")
label("loc_3D1")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x6), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_3EA")
OP_99(0xFE, 0x6, 0x7, 0x546)
Jump("loc_496")
label("loc_3EA")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x7), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_403")
OP_99(0xFE, 0x0, 0x7, 0x677)
Jump("loc_496")
label("loc_403")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x8), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_41C")
OP_99(0xFE, 0x1, 0x7, 0x645)
Jump("loc_496")
label("loc_41C")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x9), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_435")
OP_99(0xFE, 0x2, 0x7, 0x613)
Jump("loc_496")
label("loc_435")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0xA), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_44E")
OP_99(0xFE, 0x3, 0x7, 0x5E1)
Jump("loc_496")
label("loc_44E")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0xB), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_467")
OP_99(0xFE, 0x4, 0x7, 0x5AF)
Jump("loc_496")
label("loc_467")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0xC), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_480")
OP_99(0xFE, 0x5, 0x7, 0x57D)
Jump("loc_496")
label("loc_480")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0xD), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_496")
OP_99(0xFE, 0x6, 0x7, 0x54B)
label("loc_496")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_4AB")
OP_99(0xFE, 0x0, 0x7, 0x5DC)
Jump("loc_496")
label("loc_4AB")
Return()
# Function_2_324 end
def Function_3_4AC(): pass
label("Function_3_4AC")
EventBegin(0x0)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA0, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_5C9")
OP_A2(0x504)
Jc((scpexpr(EXPR_PUSH_VALUE_INDEX, 0xA), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_4FE")
ChrTalk( #0
0x101,
(
"#004F咦……\x01",
"这个照明灯,是不是有点怪呢?\x02",
)
)
CloseMessageWindow()
Jump("loc_534")
label("loc_4FE")
ChrTalk( #1
0x101,
(
"#004F咦……\x01",
"那个照明灯,是不是有点怪呢?\x02",
)
)
CloseMessageWindow()
label("loc_534")
ChrTalk( #2
0x102,
(
"#012F确实是。\x01",
"应该是有点故障了。\x02\x03",
"导力器的导力\x01",
"是可以自动积蓄的,\x01",
"所以,我想应该不用担心……\x02",
)
)
CloseMessageWindow()
Jump("loc_634")
label("loc_5C9")
Jc((scpexpr(EXPR_PUSH_VALUE_INDEX, 0xA), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_600")
ChrTalk( #3
0x101,
"#000F照明灯好像有点怪怪的。\x02",
)
CloseMessageWindow()
Jump("loc_634")
label("loc_600")
ChrTalk( #4
0x102,
(
"#015F照明灯有点闪烁。\x01",
"看来有点故障了。\x02",
)
)
CloseMessageWindow()
label("loc_634")
EventEnd(0x1)
Return()
# Function_3_4AC end
def Function_4_637(): pass
label("Function_4_637")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA0, 6)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA0, 5)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_1E51")
OP_71(0x0, 0x4)
OP_71(0x1, 0x4)
OP_A2(0x506)
EventBegin(0x0)
ClearChrFlags(0x8, 0x80)
ClearChrFlags(0x9, 0x80)
ClearChrFlags(0xA, 0x80)
ClearChrFlags(0xB, 0x80)
SetChrPos(0x8, 197700, 0, -23200, 45)
SetChrPos(0x9, 199000, 0, -24200, 0)
SetChrPos(0xA, 200900, 0, -24200, 315)
SetChrPos(0xB, 200600, 0, -23100, 315)
SetChrFlags(0x8, 0x40)
SetChrFlags(0x9, 0x40)
SetChrFlags(0xA, 0x40)
SetChrFlags(0xB, 0x40)
NpcTalk( #5
0x8,
"女孩子的声音",
"啊——!\x02",
)
CloseMessageWindow()
OP_20(0x5DC)
OP_62(0x101, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
OP_62(0x102, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(1000)
Fade(1000)
OP_6C(45000, 0)
OP_6D(200700, 2000, -24400, 0)
OP_31(0x6, 0x0, 0x12)
OP_B5(0x6, 0x0)
OP_B5(0x6, 0x1)
OP_B5(0x6, 0x5)
OP_B5(0x6, 0x4)
OP_41(0x6, 0xB5)
OP_41(0x6, 0xF4)
OP_41(0x6, 0x112)
OP_41(0x6, 0x2C9, 0x0)
OP_41(0x6, 0x271, 0x1)
OP_41(0x6, 0x262, 0x5)
OP_41(0x6, 0x26B, 0x4)
OP_35(0x6, 0xD2)
OP_36(0x6, 0x104)
AddParty(0x6, 0xFF)
SetChrPos(0x107, 204300, 0, -26400, 270)
OP_0D()
OP_21()
OP_1D(0x56)
SetChrFlags(0x101, 0x1000)
SetChrFlags(0x102, 0x1000)
Sleep(500)
OP_62(0x107, 0x0, 2000, 0x28, 0x2B, 0x64, 0x3)
NpcTalk( #6
0x107,
"小女孩",
(
"#065F#2P已、已经聚集了\x01",
"这么多只魔兽啊~……\x02\x03",
"这样下去会坏掉的……\x02\x03",
"既、既然这样的话……\x02",
)
)
CloseMessageWindow()
def lambda_81F():
OP_6B(2600, 2500)
ExitThread()
QueueWorkItem(0x107, 1, lambda_81F)
Sleep(1000)
OP_22(0xD8, 0x0, 0x64)
SetChrChipByIndex(0x107, 2)
OP_51(0x107, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(1000)
Sleep(500)
TurnDirection(0x107, 0xA, 0)
NpcTalk( #7
0x107,
"小女孩",
"#062F#2P方向OK,仰角20度……\x02",
)
CloseMessageWindow()
Sleep(400)
NpcTalk( #8
0x107,
"小女孩",
(
"#062F导力填充率30%……\x02\x03",
"#068F……发射!!\x02",
)
)
CloseMessageWindow()
LoadEffect(0x2, "map\\\\mp019_00.eff")
def lambda_901():
OP_94(0x1, 0xFE, 0xB4, 0x1F4, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0x107, 1, lambda_901)
SetChrChipByIndex(0x107, 2)
SetChrPos(0xE, 196500, 1500, -22500, 0)
OP_22(0x1FA, 0x0, 0x64)
PlayEffect(0x2, 0xFF, 0x107, 250, 1000, 250, 0, 0, 0, 1000, 1000, 1000, 0xE, 0, 0, 0, 0)
OP_99(0x107, 0x0, 0x3, 0x7D0)
OP_99(0x107, 0x3, 0x7, 0x7D0)
def lambda_979():
OP_94(0x1, 0xFE, 0x78, 0x384, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0x8, 1, lambda_979)
def lambda_98F():
OP_94(0x1, 0xFE, 0xB4, 0x3E8, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0x9, 1, lambda_98F)
def lambda_9A5():
OP_94(0x1, 0xFE, 0xE6, 0x2BC, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_9A5)
def lambda_9BB():
OP_94(0x1, 0xFE, 0x5A, 0x1F4, 0xFA0, 0x0)
ExitThread()
QueueWorkItem(0xB, 1, lambda_9BB)
Sleep(1000)
WaitChrThread(0x8, 0x1)
def lambda_9DB():
TurnDirection(0xFE, 0x107, 400)
ExitThread()
QueueWorkItem(0x8, 1, lambda_9DB)
WaitChrThread(0x9, 0x1)
def lambda_9EE():
TurnDirection(0xFE, 0x107, 400)
ExitThread()
QueueWorkItem(0x9, 1, lambda_9EE)
WaitChrThread(0xA, 0x1)
def lambda_A01():
TurnDirection(0xFE, 0x107, 400)
ExitThread()
QueueWorkItem(0xA, 1, lambda_A01)
WaitChrThread(0xB, 0x1)
def lambda_A14():
TurnDirection(0xFE, 0x107, 400)
ExitThread()
QueueWorkItem(0xB, 1, lambda_A14)
OP_8C(0x107, 270, 0)
Sleep(400)
NpcTalk( #9
0x107,
"小女孩",
(
"#062F#2P再、再靠近的话,\x01",
"这次真的会打中你们哦!\x02\x03",
"真、真的哦,我是认真的!\x02",
)
)
CloseMessageWindow()
OP_62(0xA, 0x0, 1700, 0x18, 0x1B, 0xFA, 0x0)
Sleep(300)
OP_62(0x9, 0x0, 1700, 0x18, 0x1B, 0xFA, 0x0)
Sleep(100)
OP_62(0xB, 0x0, 1700, 0x18, 0x1B, 0xFA, 0x0)
Sleep(100)
OP_62(0x8, 0x0, 1700, 0x18, 0x1B, 0xFA, 0x0)
Sleep(200)
Sleep(1000)
def lambda_AE6():
OP_6D(201700, 2000, -25100, 2500)
ExitThread()
QueueWorkItem(0x101, 1, lambda_AE6)
SetChrFlags(0x8, 0x20)
SetChrFlags(0x9, 0x20)
SetChrFlags(0xA, 0x20)
SetChrFlags(0xB, 0x20)
def lambda_B12():
OP_94(0x0, 0xFE, 0x0, 0x12C, 0x3E8, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_B12)
OP_63(0xA)
Sleep(300)
def lambda_B30():
OP_94(0x0, 0xFE, 0x0, 0x258, 0x3E8, 0x0)
ExitThread()
QueueWorkItem(0xB, 1, lambda_B30)
OP_63(0xB)
def lambda_B49():
OP_94(0x0, 0xFE, 0x0, 0x3E8, 0x3E8, 0x0)
ExitThread()
QueueWorkItem(0x9, 1, lambda_B49)
OP_63(0x9)
Sleep(600)
def lambda_B67():
OP_94(0x0, 0xFE, 0x0, 0x320, 0x3E8, 0x0)
ExitThread()
QueueWorkItem(0x8, 1, lambda_B67)
OP_63(0x8)
SetChrChipByIndex(0x107, 1)
OP_62(0x107, 0x0, 2000, 0x10, 0x13, 0xFA, 0x1)
OP_22(0x31, 0x0, 0x64)
Sleep(1700)
NpcTalk( #10
0x107,
"小女孩",
(
"#065F#2P啊……\x01",
"起、起到反效果了……\x02",
)
)
CloseMessageWindow()
SetChrPos(0x101, 210200, 0, -30000, 0)
SetChrPos(0x102, 209330, 0, -30000, 0)
SetChrFlags(0x102, 0x4)
def lambda_C0F():
OP_94(0x0, 0xFE, 0x0, 0x3E8, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_C0F)
Sleep(150)
def lambda_C2A():
OP_94(0x0, 0xFE, 0x0, 0x3E8, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0xB, 1, lambda_C2A)
def lambda_C40():
OP_94(0x0, 0xFE, 0x0, 0x1F4, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0x9, 1, lambda_C40)
Sleep(300)
def lambda_C5B():
OP_94(0x0, 0xFE, 0x0, 0x258, 0x3E8, 0x0)
ExitThread()
QueueWorkItem(0x8, 1, lambda_C5B)
Sleep(400)
NpcTalk( #11
0x107,
"小女孩",
"#069F#2P呀……!\x02",
)
OP_9E(0x107, 0x14, 0x0, 0x190, 0xFA0)
CloseMessageWindow()
def lambda_CA6():
OP_94(0x0, 0xA, 0x0, 0x7D0, 0x3E8, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_CA6)
SetChrFlags(0x101, 0x1000)
SetChrFlags(0x102, 0x1000)
SetChrChipByIndex(0x101, 4)
SetChrChipByIndex(0x102, 6)
def lambda_CD0():
OP_6B(3160, 1500)
ExitThread()
QueueWorkItem(0x101, 0, lambda_CD0)
def lambda_CE0():
OP_6D(203200, 0, -24900, 1500)
ExitThread()
QueueWorkItem(0x101, 2, lambda_CE0)
def lambda_CF8():
OP_6C(78000, 1200)
ExitThread()
QueueWorkItem(0x102, 2, lambda_CF8)
ChrTalk( #12 op#A op#5
0x101,
"#10A#1P喔喔喔喔喔!\x05\x02",
)
OP_8E(0x101, 0x326F4, 0x0, 0xFFFF9886, 0x2710, 0x0)
def lambda_D32():
OP_8E(0xFE, 0x317B8, 0x0, 0xFFFF952A, 0x2328, 0x0)
ExitThread()
QueueWorkItem(0x102, 1, lambda_D32)
def lambda_D4D():
OP_8C(0xFE, 135, 400)
ExitThread()
QueueWorkItem(0x107, 2, lambda_D4D)
SetChrFlags(0x107, 0x1000)
SetChrChipByIndex(0x107, 8)
def lambda_D65():
OP_8F(0xFE, 0x3214A, 0x0, 0xFFFF9566, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0x107, 1, lambda_D65)
OP_51(0x101, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrChipByIndex(0x101, 7)
def lambda_D90():
OP_99(0xFE, 0x0, 0xC, 0x9C4)
ExitThread()
QueueWorkItem(0x101, 3, lambda_D90)
OP_22(0xA4, 0x0, 0x64)
OP_22(0x1F4, 0x0, 0x64)
OP_96(0x101, 0x31830, 0x0, 0xFFFF9C00, 0x5DC, 0x1770)
OP_7C(0x0, 0x64, 0xBB8, 0x64)
PlayEffect(0x8, 0xFF, 0xFF, 202800, 0, -25600, 0, 0, 0, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0)
def lambda_E07():
OP_94(0x1, 0xA, 0xB4, 0x7D0, 0x3A98, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_E07)
OP_96(0x101, 0x31A92, 0x0, 0xFFFF9A52, 0x1F4, 0x1388)
def lambda_E34():
OP_94(0x1, 0xFE, 0xB4, 0x384, 0x3E8, 0x0)
ExitThread()
QueueWorkItem(0x8, 1, lambda_E34)
def lambda_E4A():
OP_94(0x1, 0xFE, 0xB4, 0x3E8, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0x9, 1, lambda_E4A)
def lambda_E60():
OP_94(0x1, 0xFE, 0xB4, 0x1F4, 0x3E8, 0x0)
ExitThread()
QueueWorkItem(0xB, 1, lambda_E60)
WaitChrThread(0x102, 0x1)
SetChrChipByIndex(0x102, 5)
ClearChrFlags(0x102, 0x4)
Sleep(1000)
NpcTalk( #13
0x107,
"小女孩",
"#065F咦……\x02",
)
CloseMessageWindow()
SetChrChipByIndex(0x107, 1)
ClearChrFlags(0x107, 0x1000)
TurnDirection(0x107, 0x101, 400)
NpcTalk( #14
0x107,
"小女孩",
"#560F啊,刚才的……!\x02",
)
CloseMessageWindow()
ChrTalk( #15
0x101,
(
"#006F待会再慢慢聊吧!\x01",
"你先退到我们后面去!\x02",
)
)
CloseMessageWindow()
ChrTalk( #16
0x102,
(
"#012F总之\x01",
"先把这些家伙赶走吧!\x02",
)
)
CloseMessageWindow()
Battle(0x3A7, 0x0, 0x0, 0x0, 0xFF)
Switch(
(scpexpr(EXPR_PUSH_VALUE_INDEX, 0x3), scpexpr(EXPR_END)),
(1, "loc_F49"),
(SWITCH_DEFAULT, "loc_F4C"),
)
label("loc_F49")
OP_B4(0x0)
Return()
label("loc_F4C")
EventBegin(0x0)
OP_4F(0x23, (scpexpr(EXPR_PUSH_LONG, 0xFF), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrFlags(0x8, 0x80)
SetChrFlags(0x9, 0x80)
SetChrFlags(0xA, 0x80)
SetChrFlags(0xB, 0x80)
SetChrPos(0x101, 202800, 0, -25600, 315)
SetChrPos(0x102, 202500, 0, -27300, 315)
SetChrPos(0x107, 204200, 0, -26900, 315)
OP_6D(203400, 0, -26050, 0)
SetChrChipByIndex(0x107, 65535)
OP_71(0x0, 0x4)
OP_71(0x1, 0x4)
FadeToBright(1000, 0)
OP_0D()
NpcTalk( #17
0x107,
"小女孩",
(
"#065F真、真是吓死人了~……\x02\x03",
"#067F那个那个……\x01",
"真是非常感谢呢。\x02\x03",
"救了我一命呢。\x02",
)
)
CloseMessageWindow()
OP_44(0x102, 0xFF)
OP_44(0x101, 0xFF)
def lambda_1040():
OP_6B(2790, 2000)
ExitThread()
QueueWorkItem(0x101, 0, lambda_1040)
SetChrChipByIndex(0x102, 65535)
TurnDirection(0x102, 0x107, 400)
SetChrChipByIndex(0x101, 65535)
TurnDirection(0x101, 0x107, 400)
WaitChrThread(0x101, 0x0)
ChrTalk( #18
0x101,
(
"#001F啊哈哈。\x01",
"你没事就好了。\x02\x03",
"#006F不过……\x01",
"以后可要吸取教训哦。\x02\x03",
"一个人和魔兽战斗\x01",
"这种危险的事可不能做哦。\x02",
)
)
CloseMessageWindow()
NpcTalk( #19
0x107,
"小女孩",
(
"#065F啊,但是但是……\x02\x03",
"如果放着不管的话,\x01",
"隧道的照明灯会坏掉呢……\x02",
)
)
CloseMessageWindow()
ChrTalk( #20
0x101,
(
"#505F这么说来……\x02\x03",
"为什么魔兽会聚集在\x01",
"熄灭了的照明灯周围呢?\x02",
)
)
CloseMessageWindow()
Jc((scpexpr(EXPR_EXEC_OP, "OP_29(0x6, 0x1, 0x8)"), scpexpr(EXPR_END)), "loc_12D6")
ChrTalk( #21
0x102,
(
"#010F以前在更换路灯的时候\x01",
"不是也发生过同样的事吗?\x02\x03",
"因为导力器里的七耀石\x01",
"是魔兽喜欢的东西。\x02\x03",
"因此路灯里\x01",
"都带有驱赶魔兽的机能……\x02\x03",
"如果这种机能坏了的话,\x01",
"自然就会容易吸引魔兽过来。\x02",
)
)
CloseMessageWindow()
Jump("loc_1392")
label("loc_12D6")
ChrTalk( #22
0x102,
(
"#010F因为导力器里的七耀石\x01",
"是魔兽喜欢的东西。\x02\x03",
"因此路灯里\x01",
"都带有驱赶魔兽的机能……\x02\x03",
"如果这种机能坏了的话,\x01",
"自然就会容易吸引魔兽过来。\x02",
)
)
CloseMessageWindow()
label("loc_1392")
ChrTalk( #23
0x101,
(
"#501F啊,原来是这样啊。\x02\x03",
"#007F不过就算这样,\x01",
"也不能这么胡来啊。\x02\x03",
"万一受伤的话可就不好了。\x02",
)
)
CloseMessageWindow()
NpcTalk( #24
0x107,
"小女孩",
(
"#063F啊……\x01",
"对、对不起……\x02",
)
)
CloseMessageWindow()
ChrTalk( #25
0x102,
(
"#019F好了好了,到此为止吧。\x02\x03",
"更何况,『不能胡来』从你嘴里说出来,\x01",
"可是完全没有说服力啊。\x02",
)
)
CloseMessageWindow()
ChrTalk( #26
0x101,
(
"#509F讨厌,少泼冷水啦。\x02\x03",
"#006F算了……\x01",
"我叫艾丝蒂尔。\x02",
)
)
CloseMessageWindow()
ChrTalk( #27
0x102,
(
"#010F我是约修亚。\x02\x03",
"我们俩都是\x01",
"游击士协会的见习游击士。\x02",
)
)
CloseMessageWindow()
NpcTalk( #28
0x107,
"小女孩",
(
"#061F哇~~\x01",
"难怪那么厉害呢……\x02\x03",
"#060F我叫提妲。\x02\x03",
"现在正在\x01",
"蔡斯的中央工房实习。\x02",
)
)
CloseMessageWindow()
ChrTalk( #29
0x101,
(
"#501F嘿嘿~\x01",
"所以才会打扮成这样吧。\x02\x03",
"那么,提妲。\x02\x03",
"你要回蔡斯的话,\x01",
"就和我们一起走吧?\x02",
)
)
CloseMessageWindow()
ChrTalk( #30
0x102,
(
"#010F是啊。\x01",
"如果再遇到魔兽就糟糕了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #31
0x107,
(
"#061F真、真的吗?\x01",
"真是非常感谢呢。\x02\x03",
"#560F啊,不过请稍等一下。\x01",
" \x02\x03",
"我得先修理好那个照明灯。\x02",
)
)
CloseMessageWindow()
ChrTalk( #32
0x101,
(
"#004F啊,那也是。\x01",
"这样放着不管的确非常危险。\x02\x03",
"不过……\x01",
"你是怎么知道这里的照明灯坏了呢?\x02",
)
)
CloseMessageWindow()
ChrTalk( #33
0x107,
(
"#060F啊,我在调查电脑的\x01",
"数据库的时候偶然发现的。\x02\x03",
"好像当初安装时候用的是次品,\x01",
"而且设置元件也不齐全。\x02",
)
)
CloseMessageWindow()
ChrTalk( #34
0x102,
(
"#010F原来如此,\x01",
"那你还是快看看吧。\x02",
)
)
CloseMessageWindow()
ChrTalk( #35
0x101,
"#505F(电脑?数据库?)\x02",
)
CloseMessageWindow()
FadeToDark(1000, 0, -1)
OP_0D()
OP_6D(198940, 30, -23590, 0)
OP_6B(2800, 0)
OP_6C(45000, 0)
SetChrPos(0x101, 199360, 10, -24480, 0)
SetChrPos(0x102, 198190, 20, -24530, 0)
SetChrPos(0x107, 199160, 20, -22710, 0)
SetChrFlags(0x107, 0x4)
Sleep(500)
FadeToBright(1000, 0)
OP_0D()
ChrTalk( #36
0x107,
"#062F#4P……嘿咻。\x02",
)
CloseMessageWindow()
OP_72(0x1, 0x4)
Sleep(100)
OP_71(0x1, 0x4)
Sleep(100)
OP_72(0x1, 0x4)
Sleep(100)
OP_71(0x1, 0x4)
Sleep(90)
OP_72(0x1, 0x4)
Sleep(80)
OP_71(0x1, 0x4)
Sleep(70)
OP_72(0x1, 0x4)
Sleep(60)
OP_71(0x1, 0x4)
Sleep(50)
OP_72(0x1, 0x4)
Sleep(1000)
ChrTalk( #37
0x107,
"#560F#4P好~这样就可以了。\x02",
)
CloseMessageWindow()
OP_8F(0x107, 0x309BC, 0x1E, 0xFFFFA4DE, 0x7D0, 0x0)
OP_8C(0x107, 180, 400)
ClearChrFlags(0x107, 0x4)
ChrTalk( #38
0x107,
"#061F#1P让你们久等了。\x02",
)
CloseMessageWindow()
ChrTalk( #39
0x101,
(
"#501F哎~好厉害。\x01",
"原来你这么熟练的啊。\x02",
)
)
CloseMessageWindow()
ChrTalk( #40
0x102,
(
"#019F真不愧是\x01",
"在中央工房的见习生啊。\x02",
)
)
CloseMessageWindow()
ChrTalk( #41
0x107,
(
"#067F#1P嘿嘿……\x01",
"这不算什么啦。\x02\x03",
"只不过是修正接触不良的结晶回路\x01",
"和调整错乱的导力压而已。\x02",
)
)
CloseMessageWindow()
ChrTalk( #42
0x101,
(
"#505F???\x02\x03",
"唔……\x01",
"听起来好像相当复杂的样子呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #43
0x107,
(
"#560F其实一点也不复杂。\x02\x03",
"这个呢,\x01",
"简单解释起来的话……\x02",
)
)
CloseMessageWindow()
ChrTalk( #44
0x107,
(
"#1K#1P在导力器的内部镶嵌着\x01",
"可以发挥各种功能的结晶回路。\x01",
"结晶回路与元件必须准确地\x01",
"进行连接才能使导力器正常运作,\x01",
"而当两者出现连接错误时,\x01",
"导力器生成的导力就会无处可去,\x01",
"其结果自然就导致\x01",
"设计时预想的机能无法正常发挥。\x01",
"以照明灯的情况来说就是发光和驱除魔兽的……\x02",
)
)
Sleep(2000)
OP_62(0x101, 0x0, 2000, 0x28, 0x2B, 0x64, 0x3)
ChrTalk( #45
0x101,
"#1K#004F停、停一下!\x02",
)
OP_56(0x1)
OP_59()
ChrTalk( #46
0x101,
(
"#506F还、还是以后再慢慢解释吧。\x01",
"我们差不多该出发了呢~\x02\x03",
"嗯嗯~\x01",
"站在这里说话也不方便嘛。\x02",
)
)
CloseMessageWindow()
ChrTalk( #47
0x107,
(
"#067F#1P啊,说得也是。\x01",
"虽然没解释完有点可惜……\x02",
)
)
CloseMessageWindow()
ChrTalk( #48
0x101,
"#007F(呼……)\x02",
)
CloseMessageWindow()
ChrTalk( #49
0x102,
(
"#019F哈哈,\x01",
"那我们继续前往蔡斯吧。\x02",
)
)
CloseMessageWindow()
ChrTalk( #50
0x101,
"#006FOK!\x02",
)
CloseMessageWindow()
ChrTalk( #51
0x107,
"#061F#1P好的。\x02",
)
CloseMessageWindow()
ClearChrFlags(0x101, 0x1000)
ClearChrFlags(0x102, 0x1000)
OP_64(0x0, 0x1)
EventEnd(0x0)
label("loc_1E51")
Return()
# Function_4_637 end
def Function_5_1E52(): pass
label("Function_5_1E52")
FadeToDark(300, 0, 100)
AnonymousTalk( #52
"\x07\x05这是一台可供旅行者回复体力的导力器装置。\x07\x00\x02",
)
OP_4F(0x28, (scpexpr(EXPR_PUSH_LONG, 0x18), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Menu(
0,
10,
32,
1,
(
"在此休息\x01", # 0
"离开\x01", # 1
)
)
MenuEnd(0x1)
OP_4F(0x28, (scpexpr(EXPR_PUSH_LONG, 0xFFFF), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_5F(0x0)
OP_56(0x0)
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_2071")
FadeToBright(100, 0)
Sleep(500)
SoundLoad(13)
OP_82(0x0, 0x2)
PlayEffect(0x0, 0x2, 0xFF, 285640, 1000, -26290, 0, 0, 0, 700, 700, 700, 0xFF, 0, 0, 0, 0)
OP_6F(0x11, 0)
OP_70(0x11, 0x32)
OP_73(0x11)
OP_20(0xBB8)
OP_22(0xC, 0x0, 0x64)
OP_82(0x2, 0x2)
LoadEffect(0x1, "map\\\\mp027_01.eff")
PlayEffect(0x1, 0x1, 0xFF, 285640, 1000, -26290, 0, 0, 0, 1500, 1500, 1500, 0xFF, 0, 0, 0, 0)
FadeToDark(1000, 0, -1)
Sleep(700)
OP_22(0xD, 0x0, 0x64)
OP_0D()
OP_31(0x0, 0xFE, 0x0)
OP_31(0x1, 0xFE, 0x0)
OP_31(0x2, 0xFE, 0x0)
OP_31(0x3, 0xFE, 0x0)
OP_31(0x4, 0xFE, 0x0)
OP_31(0x5, 0xFE, 0x0)
OP_31(0x6, 0xFE, 0x0)
OP_31(0x7, 0xFE, 0x0)
SetChrPos(0x0, 285600, 30, -28390, 13)
SetChrPos(0x1, 285600, 30, -28390, 13)
SetChrPos(0x2, 285600, 30, -28390, 13)
SetChrPos(0x3, 285600, 30, -28390, 13)
OP_69(0x0, 0x0)
OP_30(0x0)
Sleep(3500)
OP_82(0x1, 0x2)
LoadEffect(0x0, "map\\\\mp027_00.eff")
PlayEffect(0x0, 0x0, 0xFF, 285640, 1000, -26290, 0, 0, 0, 1300, 1300, 1300, 0xFF, 0, 0, 0, 0)
OP_6F(0x11, 0)
OP_1E()
FadeToBright(1000, 0)
OP_56(0x0)
TalkEnd(0xFF)
Return()
label("loc_2071")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_208B")
FadeToBright(300, 0)
TalkEnd(0xFF)
Return()
label("loc_208B")
Return()
# Function_5_1E52 end
SaveToFile()
Try(main)
| [
"[email protected]"
] | |
b54da796c8f41bc31e9c535663ef12d38bbd0a51 | 6a0157b40e54d9c6fcca1f96c4dd0ebd8101813b | /motsfinder/axisym/curve/expcalc.py | 5e0ca4e59c313db74164088098d022811d80c854 | [
"MIT"
] | permissive | cevans216/distorted-motsfinder-public | 15ba97d632928e36c6a8ac159a6a2c79b7ae6156 | a7f28b13f3b80b5980ef9eccf705146b77691498 | refs/heads/master | 2020-07-28T05:38:16.662849 | 2019-07-26T17:44:55 | 2019-07-26T17:44:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,938 | py | r"""@package motsfinder.axisym.curve.expcalc
Computation class storing interim results of expansion calculations.
The implementation here uses the formulas derived in
\ref thornburg2003_1 "[1]". Specifically, we make heavy use of the quantities
`A, B, C, D` defined in \ref thornburg2003_1 "[1]" in equation (12) to compute
the expansion \f$ \Theta \f$ using equation (11). See also
\ref pookkolb2018_1 "[2]" and the docstrings of the individual procedures.
In the base class ExpansionCalc defined in this module, we do not consider how
the used quantities \f$ s_i \f$ and \f$ \partial_i s_j \f$ are obtained. This
depends on how the surfaces are represented and hence is the responsibility of
subclasses to implement. Additionally, subclasses also need to supply surface
parameter derivatives defined in \ref thornburg2003_1 "[1]" as
\f$ X^u_i = \partial_i y^u \f$ and
\f$ X^u_{ij} = \partial_i\partial_j y^u \f$.
In the axisymmetric case considered here, we have only one parameter,
\f$ y^u = \lambda \f$ along the curve, and hence drop the `u` superscript.
Note that in this code, we call the covector field \f$ X_i \f$ simply `X` and
the 2nd rank tensor field \f$ X_{ij} \f$ simply `Y` (Python cannot
differentiate between objects based on how many indices you use).
@b Examples
See implementations starshapedcurve._StarShapedExpansionCalc and
refparamcurve._RefParamExpansionCalc.
@b References
\anchor thornburg2003_1 [1] Thornburg, Jonathan. "A fast apparent horizon finder
for three-dimensional Cartesian grids in numerical relativity." Classical
and quantum gravity 21.2 (2003): 743.
\anchor pookkolb2018_1 [2] D. Pook-Kolb, O. Birnholtz, B. Krishnan and E.
Schnetter, "The existence and stability of marginally trapped surfaces."
arXiv:1811.10405 [gr-qc].
"""
from abc import ABCMeta, abstractmethod
from math import fsum
from six import add_metaclass
import numpy as np
from scipy import linalg
from scipy.misc import derivative
from ...utils import cache_method_results
from ...numutils import inverse_2x2_matrix_derivative
from ...metric import christoffel_symbols, christoffel_deriv
from ...metric import riemann_components
__all__ = []
# It is customary to denote indices of tensors without spaces, e.g.:
# T_{ijk} => T[i,j,k]
# We disable the respective pylint warning for this file.
# pylint: disable=bad-whitespace
@add_metaclass(ABCMeta)
class ExpansionCalc(object):
r"""Abstract base class for computing the expansion at one point.
This class serves as coordinator for computing the expansion and
functional derivatives w.r.t. the horizon function. Sub classes need only
implement a small number of computational methods.
The purpose of having a separate class hierarchy for computing the
expansion (as opposed to doing all the computations inside the curve
classes) is to be able to store a number of interim results valid only for
the results at one point of the surface. Including these as `cache` in the
curve classes would in principle be possible. To ease management of cache
invalidation (when computing at a different point), the complete cache
should live on one object. The ExpansionCalc class and its sub classes can
be interpreted as such a cache, with added functionality to do the
necessary computations using the cached values.
"""
def __init__(self, curve, h_fun, param, metric):
r"""Create a "calc" object for certain point of a curve.
The curve represents an axisymmetric surface.
@param curve (expcurve.ExpansionCurve)
The curve representing the (trial) surface on which to compute the
expansion and other quantities.
@param h_fun (exprs.numexpr.NumericExpression)
The (1D) "horizon" function. The subclasses implementing this
ExpansionCalc class are free to interpret as they wish.
@param param (float)
The parameter value along the `curve` at which the quantities
should be computed.
@param metric
The Riemannian 3-metric defining the geometry of the surrounding
space.
"""
## Step sizes for FD numerical differentiation of the expansion
## \wrt `h`, `h'`, ``h''``, respectively.
self.dx_hdiffs = (1e-6, 1e-6, 1e-3)
## Finite difference differentiation order.
self.fd_order = 3
## The curve representing the (trial) surface.
self.curve = curve
## Horizon function (in case we need higher derivatives than ``h''``).
self.h_fun = h_fun
## Value of horizon function `h` at the given parameter.
self.h = h_fun(param)
## Value of `h'` at the given parameter.
self.dh = h_fun.diff(param, n=1)
## Value of ``h''`` at the given parameter.
self.ddh = h_fun.diff(param, n=2)
## Parameter on the curve at which to do the computations.
self.param = param
point = curve(param, xyz=True)
## 3D point in `x`,`y`,`z` coordinates.
self.point = point
## Metric (tensor field).
self.metric = metric
## Metric tensor at the point to do computations at.
self.g = metric.at(point)
if curve.extr_curvature is None:
## Extrinsic curvature at the point to do computations at.
self.K = None
else:
self.K = curve.extr_curvature(point)
# Cached metric derivatives (computed on-demand).
self._dg = None
self._dg_inv = None
self._ddg = None
self._ddg_inv = None
## Derivatives \f$ \partial_i \ln\sqrt{g} \f$
self.dlnsqrtg = np.asarray(metric.diff_lnsqrtg(point))
s, ds, X, Y = self._compute_s_ds_X_Y()
## Normal covector (not normalized).
self.s = np.asarray(s)
## Derivative matrix \f$ \partial_i s_j \f$ of normal vector.
self.ds = np.asarray(ds)
## Derivative covector \f$ X_i := \partial_i \lambda(\vec x) \f$.
self.X = np.asarray(X)
## Second derivatives \f$ Y := X_{ij} := \partial_i\partial_j\lambda\f$.
self.Y = np.asarray(Y)
## Contravariant normal vector (not normalized).
self.s_up = self.g.raise_idx(s)
## Contravariant parameter derivative \f$ X^i := g^{ij}X_j \f$.
self.X_up = self.g.raise_idx(X)
ABCD, trK = self._compute_ABCDtrK()
## A, B, C, D terms of the Thornburg expansion formula.
self.ABCD = ABCD
## Trace of the extrinsic curvature.
self.trK = trK
## Cached expansion result.
self._Th = None
@property
def dg(self):
r"""Derivative of 3-metric components \wrt x,y,z."""
if self._dg is None:
self._dg = np.asarray(self.metric.diff(self.point, diff=1))
return self._dg
@property
def dg_inv(self):
r"""Derivative of inverse 3-metric components.
This is computed using
\f$0 = \partial_i \delta^a_b = \partial_i(g^{ac}g_{cb})\f$
from which we get
\f[
\partial_i g^{-1} = -g^{-1} (\partial_i g) g^{-1}.
\f]
"""
if self._dg_inv is None:
g_inv = self.g.inv
dg = self.dg
# explanation:
# X = g_inv.dot(dg) == g^ad partial_i g_db
# Y = X.dot(g_inv) == X^a_ib g^be
# => Y has indices Y[a,i,e] == (g^-1 partial_i g g^-1)^ae
# we want "i" to be the first axis => swapaxes(0, 1)
# equivalent to: -np.einsum('ic,acd,dj', _g_inv, _dg, _g_inv)
self._dg_inv = -(
g_inv.dot(dg).dot(g_inv).swapaxes(0, 1)
)
return self._dg_inv
@property
def ddg(self):
r"""Second derivatives of 3-metric components."""
if self._ddg is None:
self._ddg = np.asarray(self.metric.diff(self.point, diff=2))
return self._ddg
@property
def ddg_inv(self):
r"""Second derivatives of inverse 3-metric components.
As for `dg_inv`, using
\f$0 = \partial_i \partial_j \delta^a_b
= \partial_i \partial_j (g^{ac}g_{cb})\f$
we get
\f[
\partial_i \partial_j g^{-1}
= -g^{-1}\big[
(\partial_i \partial_j g) g^{-1}
+ (\partial_j g) (\partial_i g^{-1})
+ (\partial_i g) (\partial_j g^{-1})
\big].
\f]
"""
if self._ddg_inv is None:
g_inv = self.g.inv
dg = self.dg
dg_inv = self.dg_inv
ddg = self.ddg
# equivalent to:
# -(
# + np.einsum('ij,abjk,kl', g_inv, ddg, g_inv)
# + np.einsum('ij,bjk,akl', g_inv, dg, dg_inv)
# + np.einsum('ij,ajk,bkl', g_inv, dg, dg_inv)
# )
tmp = g_inv.dot(dg).dot(dg_inv)
self._ddg_inv = -(
+ np.moveaxis(g_inv.dot(ddg).dot(g_inv), [1,2,0], [0,1,2])
+ np.moveaxis(tmp, [2,1,0], [0,1,2])
+ np.moveaxis(tmp, [1,2,0], [0,1,2])
)
return self._ddg_inv
def _compute_ABCDtrK(self):
r"""Compute the A, B, C, D and trace(K) terms.
The computation only uses the cached covariant normal `s` and its
derivatives `ds` (in addition to the metric and extrinsic curvature,
of course). This means that any subclass only needs to implement
computing `s` and `ds` in order to use this function.
This computes the terms as defined in equation (12) in
\ref thornburg2003_1 "[1]".
"""
s, s_up, ds = self.s, self.s_up, self.ds
g, dg_inv, dlnsqrtg = self.g, self.dg_inv, self.dlnsqrtg
A = (
- ds.dot(s_up).dot(s_up)
- 0.5 * dg_inv.dot(s).dot(s).dot(s_up)
)
B = (
dg_inv.dot(s).diagonal().sum()
+ g.inv.dot(ds).diagonal().sum()
+ dlnsqrtg.dot(s_up)
)
if self.K is None:
trK = 0.0
C = 0.0
else:
trK = g.inv.dot(self.K).diagonal().sum()
C = self.K.dot(s_up).dot(s_up)
D = s.dot(s_up)
return (A, B, C, D), trK
def expansion(self, ingoing=False):
r"""Compute the expansion at the configured point.
This implements equation (11) in \ref thornburg2003_1 "[1]".
"""
if ingoing:
A, B, C, D = self.ABCD
return -A/D**1.5 - B/D**0.5 + C/D - self.trK
if self._Th is None:
A, B, C, D = self.ABCD
self._Th = A/D**1.5 + B/D**0.5 + C/D - self.trK
return self._Th
def diff(self, hdiff=0):
r"""Compute derivative of expansion \wrt `h`, `h'`, or ``h''``.
The argument `hdiff` controls the derivative order of `h` with
respect to which to differentiate the expansion, i.e. `hdiff=0` will
compute \f$ \partial_{h}\Theta \f$, while for `hdiff=2` we
compute \f$ \partial_{h''}\Theta \f$.
Numerical FD differentiation is performed if a `NotImplementedError`
is raised in one of the subroutines.
"""
try:
return self._diff(hdiff=hdiff)
except NotImplementedError:
return self._diff_FD(hdiff=hdiff)
def _diff_FD(self, hdiff):
r"""Compute derivatives of the expansion using finite differencing.
Since the expansion depends on `h` and its derivatives only
ultra-locally, a reasonable approximation to the variational
derivative of the expansion w.r.t. `h` can be obtained by varying `h`
(or derivatives) point-wise, i.e. compute the usual partial derivative
of the expansion w.r.t. `h`. This can be approximated using a finite
difference differentiation, which is done in this function. Note that
irrespective of the accuracy of this approximation, the test whether
the expansion has the desired value (e.g. 0.0 for a MOTS) is
independent of the results computed here.
"""
h_orig = self.curve.h
Th0 = self.expansion()
param = self.param
h_plus_eps = _FuncVariation(h_orig.evaluator(), diff=hdiff)
with self.curve.override_evaluator(h_plus_eps):
def f(eps):
if eps == 0:
return Th0
h_plus_eps.eps = eps
with self.curve.suspend_calc_obj():
return self.curve.expansion(param)
dx = self.dx_hdiffs[hdiff]
return derivative(f, x0=0.0, n=1, dx=dx, order=self.fd_order)
def _diff(self, hdiff):
r"""Compute analytical functional derivatives of the expansion.
This may raise a `NotImplementedError`, indicating that FD
differentiation needs to be performed.
@param hdiff
Derivative order of `h` to differentiate the expansion by (see
below). E.g., a value of `0` will compute \f$\partial_h \Theta\f$.
@b Notes
In general, due to the ultra-local dependency of the expansion on `h`
and its first two derivatives, we can treat the variational
differentiation like a simple partial differentiation. This can also
be seen by taking the definition
\f[
(\delta\Theta)(h)\Delta
:= \frac{d}{d\varepsilon}\Big|_{\varepsilon=0}
\Theta(h+\varepsilon\Delta)
\f]
and separating the terms based on the derivative order of
\f$\Delta\f$. The result will be of the form
\f[
(\delta\Theta)(h)\Delta =
\partial_h\Theta \Delta
+ \partial_{h'}\Theta \Delta'
+ \partial_{h''}\Theta \Delta''.
\f]
These three terms are computed here using
\f[
\partial_f \Theta =
\frac{A_f}{D^{3/2}}
- \frac{3}{2} \frac{A D_f}{D^{5/2}}
+ \frac{B_f}{D^{1/2}}
- \frac{1}{2} \frac{B D_f}{D^{3/2}}
+ \frac{C_f}{D}
- \frac{C D_f}{D^2}
- \partial_f \,\mathrm{tr} K,
\f]
where `f` is one of ``h, h', h''``.
The terms `A`, `B`, `C`, and `D` are defined in [1], but here we
repeat them for convenience:
\f{eqnarray*}{
A &:=& -s^i s^j \partial_i s_j - \frac{1}{2} s^i (\partial_i g^{kl}) s_k s_l \\
B &:=& (\partial_i g^{ij}) s_j + g^{ij} \partial_i s_j + (\partial_i \ln\sqrt{g}) s^i \\
C &:=& K^{ij} s_i s_j \\
D &:=& s_i s^i.
\f}
@b References
[1] Thornburg, Jonathan. "A fast apparent horizon finder for
three-dimensional Cartesian grids in numerical relativity."
Classical and quantum gravity 21.2 (2003): 743.
"""
if hdiff == 0: # del_h H
A, B, C, D = self.ABCD
dhA, dhB, dhC, dhD, dhtrK = self.get_dh_ABCDtrK()
return (
- 3 * A * dhD / (2*D**2.5) - B * dhD / (2*D**1.5)
- C/D**2 * dhD
+ dhC / D + dhB / np.sqrt(D) + dhA / D**1.5
- dhtrK
)
if hdiff == 1: # del_h' H
A, B, C, D = self.ABCD
dhpA, dhpB, dhpC, dhpD = self.get_dhp_ABCD()
return (
- 3 * A * dhpD / (2*D**2.5) - B * dhpD / (2*D**1.5)
- C/D**2 * dhpD
+ dhpC / D + dhpB / np.sqrt(D) + dhpA / D**1.5
)
if hdiff == 2: # del_h'' H
D = self.ABCD[-1]
dhppA, dhppB = self.get_dhpp_AB()
return (D * dhppB + dhppA) / D**1.5
raise NotImplementedError
def get_dh_ABCDtrK(self):
r"""Compute the derivative of A, B, C, D, tr(K) \wrt `h`.
May raise `NotImplementedError` to indicate numerical differentiation
should be done.
Refer to the definition of `A,B,C,D` in the documentation of _diff().
The terms computed here are:
\f[
\partial_h A = -2(\partial_h s^i) s^j \partial_i s_j
- s^i s^j \partial_h \partial_i s_j
- \frac{1}{2} (\partial_h s^i) (\partial_i g^{kl}) s_k s_l
- \frac{1}{2} s^i (\partial_h \partial_i g^{kl}) s_k s_l
- s^i (\partial_i g^{kl}) s_k \partial_h s_l
\f]
\f[
\partial_h B =
(\partial_h \partial_i g^{ij}) s_j
+ (\partial_i g^{ij}) \partial_h s_j
+ (\partial_h g^{ij}) \partial_i s_j
+ g^{ij} \partial_h \partial_i s_j
+ (\partial_h \partial_i \ln\sqrt{g}) s^i
+ (\partial_i \ln\sqrt{g}) \partial_h s^i
\f]
\f[
\partial_h C =
\big[(\partial_h g^{ik}) g^{jl} + g^{ik}(\partial_h g^{jl})\big]
K_{kl} s_i s_j
+ g^{ik} g^{jl} (\partial_h K_{kl}) s_i s_j
+ 2 g^{ik} g^{jl} K_{kl} s_i \partial_h s_j
\f]
\f[
\partial_h D =
(\partial_h g^{ij}) s_i s_j + 2 g^{ij} s_i \partial_h s_j
\f]
\f[
\partial_h \mathrm{tr}K =
(\partial_h g^{ij}) K_{ij} + g^{ij} \partial_h K_{ij}
\f]
The individual terms are computed by simply applying the chain rule.
We obtain for any quantity `f` which depends on the coordinates
`x,y,z`:
\f[
\partial_h f = (\partial_i f) (\partial_h\gamma)^i,
\f]
where \f$\gamma\f$ is the curve along which the computation takes
place.
"""
dh_gamma = self.curve.h_diff(self.param)
g_inv, dg_inv, dlnsqrtg = self.g.inv, self.dg_inv, self.dlnsqrtg
dg = self.dg
ddg = self.ddg
ddg_inv = self.ddg_inv
s, s_up, ds = self.s, self.s_up, self.ds
dds = self.compute_dds()
dhs = ds.dot(dh_gamma)
dhg_inv = np.einsum('aij,a', dg_inv, dh_gamma)
dhs_up = dhg_inv.dot(s) + g_inv.dot(dhs)
dhdg_inv = np.einsum('aikl,a', ddg_inv, dh_gamma)
dhds = dds.dot(dh_gamma)
dhdlnsqrtg = (
0.5 * np.einsum('icd,acd,a', dg_inv, dg, dh_gamma)
+ 0.5 * np.einsum('cd,iacd,a', g_inv, ddg, dh_gamma)
)
dhA = (
- 2 * np.einsum('i,j,ij', dhs_up, s_up, ds)
- np.einsum('i,j,ij', s_up, s_up, dhds)
- 0.5 * np.einsum('i,ikl,k,l', dhs_up, dg_inv, s, s)
- 0.5 * np.einsum('i,ikl,k,l', s_up, dhdg_inv, s, s)
- np.einsum('i,ikl,k,l', s_up, dg_inv, s, dhs)
)
dhB = (
np.einsum('iij,j', dhdg_inv, s)
+ np.einsum('iij,j', dg_inv, dhs)
+ dhg_inv.dot(ds).diagonal().sum()
+ g_inv.dot(dhds).diagonal().sum()
+ dhdlnsqrtg.dot(s_up)
+ dlnsqrtg.dot(dhs_up)
)
dhD = (
np.einsum('ij,i,j', dhg_inv, s, s)
+ 2 * np.einsum('ij,i,j', g_inv, s, dhs)
)
if self.K is None:
dhC = 0.0
dhtrK = 0.0
else:
K = self.K
dK = self.curve.extr_curvature(self.point, diff=1)
dhK = np.einsum('aij,a', dK, dh_gamma)
dhC = (
np.einsum('ik,jl,kl,i,j', dhg_inv, g_inv, K, s, s)
+ np.einsum('ik,jl,kl,i,j', g_inv, dhg_inv, K, s, s)
+ np.einsum('ik,jl,kl,i,j', g_inv, g_inv, dhK, s, s)
+ 2 * np.einsum('ik,jl,kl,i,j', g_inv, g_inv, K, s, dhs)
)
dhtrK = (
np.einsum('ij,ij', dhg_inv, K)
+ np.einsum('ij,ij', g_inv, dhK)
)
return dhA, dhB, dhC, dhD, dhtrK
def get_dhp_ABCD(self):
r"""Compute the derivative of A, B, C, D \wrt `h'`.
May raise `NotImplementedError` to indicate numerical differentiation
should be done.
This implementation is correct iff
\f{eqnarray*}{
\partial_{h'} s_i &=& - X_i\\
\partial_{h'} \partial_i s_j &=& - X_{ij},
\f}
where \f$X_i := \partial_i \lambda\f$ and
\f$X_{ij} := \partial_i \partial_j \lambda\f$.
The terms computed here then become (refer to _diff()):
\f{eqnarray*}{
\partial_{h'} A &=&
2 X^i s^j \partial_i s_j + s^i s^j X_{ij}
+ \frac{1}{2} (\partial_i g^{kl}) (X^i s_k s_l + 2 s^i X_k s_l)
\\
\partial_{h'} B &=&
-(\partial_i g^{ij}) X_j - g^{ij} X_{ij} - (\partial_i\ln\sqrt{g}) X^i
\\
\partial_{h'} C &=& -2 K_{ij} X^i s^j
\\
\partial_{h'} D &=& -2 X_i s^i
\f}
This method is agnostic as to how the surfaces are represented as long
as the quantities \f$s_i\f$, \f$\partial_i s_j\f$, \f$X_i\f$, and
\f$X_{ij}\f$ are available.
"""
g_inv, dg_inv, dlnsqrtg = self.g.inv, self.dg_inv, self.dlnsqrtg
s, s_up, ds = self.s, self.s_up, self.ds
X, X_up, Y = self.X, self.X_up, self.Y
dhpA = (
2 * ds.dot(X_up).dot(s_up)
+ Y.dot(s_up).dot(s_up)
+ 0.5 * dg_inv.dot(s).dot(s).dot(X_up)
+ dg_inv.dot(X).dot(s).dot(s_up)
)
dhpB = (
- dg_inv.dot(X).diagonal().sum()
- g_inv.dot(Y).diagonal().sum()
- dlnsqrtg.dot(X_up)
)
if self.K is None:
dhpC = 0.0
else:
dhpC = - 2 * self.K.dot(X_up).dot(s_up)
dhpD = - 2 * X.dot(s_up)
return dhpA, dhpB, dhpC, dhpD
def get_dhpp_AB(self):
r"""Compute the derivative of A and B \wrt ``h''``.
May raise `NotImplementedError` to indicate numerical differentiation
should be done.
This implementation is correct iff
\f{eqnarray*}{
\partial_{h''} s_i &=& 0\\
\partial_{h''} \partial_i s_j &=& - X_i X_j.
\f}
We compute here (see also _diff()):
\f{eqnarray*}{
\partial_{h''} A &=& s^i s^j X_i X_j \\
\partial_{h''} B &=& -X^i X_i \\
\partial_{h''} C &=& \partial_{h''} D = 0
\f}
This method is agnostic as to how the surfaces are represented as long
as the quantities \f$s_i\f$, \f$\partial_i s_j\f$, \f$X_i\f$, and
\f$X_{ij}\f$ are available.
"""
X, X_up = self.X, self.X_up
s_up = self.s_up
dhppA = np.outer(X, X).dot(s_up).dot(s_up)
dhppB = - X_up.dot(X)
return dhppA, dhppB
@abstractmethod
def _compute_s_ds_X_Y(self):
r"""Compute the terms we need to compute the expansion.
Subclasses need to interpret the horizon function and compute the
covariant normal (not normalized), its derivatives, and the parameter
first (`X = del_i lambda`) and second (`Y = del_i del_j lambda`)
derivatives.
"""
pass
def _compute_dds_Z(self):
r"""Compute second derivatives of the normal and third ones of lambda.
This computes \f$\partial_i\partial_j s_k\f$ and
\f$Z := X_{ijk} = \partial_i\partial_j\partial_k \lambda\f$.
@return Two elements, the first containing the derivatives of the
non-normalized covariant normal `s` and the second those of the
parameter \f$\lambda\f$.
"""
raise NotImplementedError
def _compute_d2_Y(self):
r"""Compute second derivatives of xi and lambda \wrt x,y,z."""
raise NotImplementedError
def _compute_d3_Z(self):
r"""Compute third derivatives of xi and lambda \wrt x,y,z."""
raise NotImplementedError
def ricci_scalar(self):
r"""Compute the Ricci scalar of the surface represented by the curve.
The Ricci scalar of a 2-surface is defined as (see e.g. [1])
\f$R = q^{AB}R_{AB}\f$, where `q` is the induced metric
\f$q_{ab} = g_{ab} - \nu_a \nu_b\f$, \f$R_{AB}\f$ is the Ricci tensor
\f$R_{AB} = R^C_{\ A\,CB}\f$ and \f$\nu\f$ the covariant outward unit
normal of the surface.
Here, \f$R^A_{\ B\,CD}\f$ is the Riemann tensor.
Note that `A,B` run over the coordinates \f$(\lambda,\varphi)\f$ on
the surface and `a,b` over `x,y,z`.
See induced_metric() for a bit more details on the induced metric `q`
and the coordinate transformation to get the components \f$q_{AB}\f$
we need here.
It is convenient to compute the Ricci scalar from the purely covariant
Riemann tensor \f$R_{AB\,CD} = q_{AE}R^E_{\ B\,CD}\f$ as this is
antisymmetric in the first and last two index pairs, i.e. it has only
one independent component \f$R_{\lambda\varphi\,\lambda\varphi}\f$ in
two dimensions.
A short calculation reveals
\f[
R = q^{AB}R_{AB}
= 2 R_{\lambda\varphi\,\lambda\varphi}
(q^{\lambda\lambda}q^{\varphi\varphi} - (q^{\lambda\varphi})^2).
\f]
@b References
[1] Straumann, Norbert. General relativity. Springer Science &
Business Media, 2004.
"""
R_0101 = self.covariant_riemann()
q_inv = self.induced_metric(inverse=True)
return 2 * R_0101 * (q_inv[0,0]*q_inv[1,1] - q_inv[0,1]**2)
def induced_metric(self, diff=0, inverse=False):
r"""Compute the induced metric on the surface.
This method computes the components of the induced metric in
\f$(\lambda,\varphi)\f$ coordinates as well as the components of the
inverse (i.e. indices upstairs) and derivatives of these components.
Since this class assumes axisymmetry throughout, this method requires
(without loss of generality) that the point at which the metric is to
be returned is located at `phi=0`, i.e. `y=0` and `x>0`.
@param diff
Derivative order to compute. Default is `0`.
@param inverse
Whether to return the (derivatives of the) inverse of the induced
metric. Default is `False`.
@return NumPy array with ``2+diff`` axes, such that the indices
``[A1,A2,...,B,C]`` correspond to
\f$\partial_{A_1}\partial_{A_2}\ldots q_{BC}\f$ for
`inverse==False` and with upstairs indices for `invers==True`.
@b Notes
The induced 2-metric `q` on the surface \f$\sigma\f$ is formally given
by
\f[
q = \Pi_\sigma g = g\big|_\sigma - \underline{\nu} \otimes \underline{\nu},
\qquad
q_{ab} = g_{ab} - \nu_a \nu_b,
\f]
where \f$\nu\f$ is the outward pointing normal of \f$\sigma\f$ and
\f$\underline{\nu} = g(\nu,\,\cdot\,)\f$.
The induced metric can easily be expressed in terms of the components
of the 3-metric `g` by expanding these into the cobasis fields of the
coordinates \f$\lambda, \varphi\f$ on the 2-surface (and thereby
dropping any transversal components). As a result, we get the simple
formula
\f[
q_{AB} = g_{ij}\ (\partial_A x^i)\ (\partial_B x^j),
\f]
where `A,B = 1,2` and
\f$(\partial_A) = (\partial_\lambda, \partial_\varphi)\f$.
The derivatives of the Cartesian coordinates `x,y,z` are computed in
diff_xyz_wrt_laph().
From this, we easily get the first and second derivatives by applying
the chain and product rule:
\f{eqnarray*}{
\partial_A q_{CD} &=&
(\partial_A g_{ij}) x_C^i x_D^j
+ g_{ij} (x_{CA}^i x_D^j + x_C^i x_{DA}^j)
\\
\partial_A\partial_B q_{CD} &=&
(\partial_A\partial_B g_{ij}) x_C^i x_D^j
+ (\partial_A g_{ij}) (x_{CB}^i x_D^j + x_C^i x_{DB}^j)
+ (\partial_B g_{ij}) (x_{CA}^i x_D^j + x_C^i x_{DA}^j)
\\&&
+ g_{ij} (x_{CAB}^i x_D^j + x_{CA}^i x_{DB}^j
+ x_{CB}^i x_{DA}^j + x_C^i x_{DAB}^j).
\f}
Here, \f$x_{A}^i := \partial_A x^i\f$, etc.
"""
return self._induced_metric(diff, bool(inverse))
@cache_method_results()
def _induced_metric(self, diff, inverse):
if inverse:
q = self.induced_metric(diff=0)
if diff == 0:
return linalg.inv(q)
dq = self.induced_metric(diff=1)
if diff == 1:
dq_inv = inverse_2x2_matrix_derivative(q, dq, diff=1)
return dq_inv
ddq = self.induced_metric(diff=2)
if diff == 2:
ddq_inv = inverse_2x2_matrix_derivative(q, dq, ddq, diff=2)
return ddq_inv
raise NotImplementedError
dx = self.diff_xyz_wrt_laph(diff=1)
g = self.g.mat
if diff == 0:
q = np.einsum('ij,ai,bj', g, dx, dx)
return q
ddx = self.diff_xyz_wrt_laph(diff=2)
dg = self.dg
dg_laph = np.einsum('ak,kij', dx, dg)
if diff == 1:
dq = (
np.einsum('aij,bi,cj', dg_laph, dx, dx)
+ np.einsum('ij,bai,cj', g, ddx, dx)
+ np.einsum('ij,bi,caj', g, dx, ddx)
)
return dq
d3x = self.diff_xyz_wrt_laph(diff=3)
ddg = self.ddg
ddg_laph = (
np.einsum('abk,kij', ddx, dg)
+ np.einsum('ak,bl,klij', dx, dx, ddg)
)
ddq = (
np.einsum('abij,ci,dj', ddg_laph, dx, dx)
+ np.einsum('aij,cbi,dj', dg_laph, ddx, dx)
+ np.einsum('aij,ci,dbj', dg_laph, dx, ddx)
+ np.einsum('bij,cai,dj', dg_laph, ddx, dx)
+ np.einsum('bij,ci,daj', dg_laph, dx, ddx)
+ np.einsum('ij,cabi,dj', g, d3x, dx)
+ np.einsum('ij,cai,dbj', g, ddx, ddx)
+ np.einsum('ij,cbi,daj', g, ddx, ddx)
+ np.einsum('ij,ci,dabj', g, dx, d3x)
)
if diff == 2:
return ddq
raise NotImplementedError
def diff_xyz_wrt_laph(self, diff=1):
r"""Compute derivatives of x,y,z \wrt lambda and phi.
This computes the derivatives of the Cartesian coordinates `x,y,z`
w.r.t. the surface intrinsic coordinates `lambda` and `phi` based on
the usual transform rules
\f{eqnarray*}{
x = \rho(\lambda)\cos\varphi,\quad
y = \rho(\lambda)\sin\varphi,\quad
z = z(\lambda),
\f}
where \f$\rho\f$ is the `x`-component of the curve and `z` its
`z`-component. The results are evaluated at \f$\varphi = 0\f$.
@return For ``diff==1``, return the first derivatives with indices
``dx[A,i]`` meaning \f$\partial_A x^i\f$, where we have
\f$(x^i) := (x,y,z)\f$ and
\f$(\partial_A) := (\partial_\lambda, \partial_\varphi)\f$.
For ``diff==2``, second derivatives are returned with indices
``ddx[A,B,i]`` meaning \f$\partial_A\partial_B x^i\f$.
The same pattern holds for ``diff==3``.
If ``diff==None``, a list ``[dx, ddx, dddx]`` is returned.
@param diff
Derivative order. One of `1`, `2`, `3`. Default is `1`.
If explicitely set to None, all three implemented orders are
returned.
"""
# Here we'll call r==rho and dr==\partial_lambda rho,
# l==lambda, p==phi, etc.
results = []
r, _ = self.curve(self.param, xyz=False)
dr, dz = self.curve.diff(self.param, diff=1)
if diff is None or diff == 1:
dx = np.array([
[dr, 0., dz], # partial_lambda (x,y,z)
[0., r, 0.], # partial_phi (x,y,z)
])
if diff == 1:
return dx
results.append(dx)
ddr, ddz = self.curve.diff(self.param, diff=2)
if diff is None or diff == 2:
dll = [ddr, 0., ddz]
dlp = [0., dr, 0.]
dpp = [-r, 0., 0.]
ddx = np.array([
[dll, dlp],
[dlp, dpp],
])
if diff == 2:
return ddx
results.append(ddx)
d3r, d3z = self.curve.diff(self.param, diff=3)
if diff is None or diff == 3:
dlll = [d3r, 0., d3z]
dllp = [0., ddr, 0.]
dlpp = [-dr, 0., 0.]
dppp = [0., -r, 0.]
dddx = np.array([
[[dlll, dllp],
[dllp, dlpp]],
[[dllp, dlpp],
[dlpp, dppp]],
])
if diff == 3:
return dddx
results.append(dddx)
if diff is None:
return results
raise ValueError("Unknown derivative order: %s" % diff)
def covariant_normal(self, diff=0):
r"""Compute (derivatives of) the normalized covariant normal.
@param diff
Derivative order to compute. Default is `0`.
@return NumPy `ndarray` with ``diff+1`` axes and indices
``i1,i2,...,k`` corresponding to
\f$\partial_{i_1}\partial_{i_2}\ldots\nu_k\f$. For example, for
``diff==0``, returns the three components of `nu`.
@b Notes
Given the non-normalized components \f$s_i\f$ of the covariant outward
pointing normal on the surface, we compute
\f[
\nu_i = \frac{s_i}{\sqrt{D}}, \qquad D := g^{kl} s_k s_l.
\f]
From this formula, we get the x,y,z derivatives
\f[
\partial_i\nu_j =
\frac{\partial_i s_j}{\sqrt{D}}
- \frac{s_j}{2 D^{3/2}} D_i
\f]
and
\f[
\partial_i\partial_j\nu_k =
\frac{\partial_i \partial_j s_k}{\sqrt{D}}
- \frac{1}{2 D^{3/2}}
\Big(
(\partial_j s_k) D_i
+ (\partial_i s_k) D_j
+ s_k D_{ij}
\Big)
+ \frac{3}{4} \frac{s_k}{D^{5/2}} D_i D_j,
\f]
where
\f{eqnarray*}{
D_i &:=& \partial_i D
= (\partial_i g^{kl}) s_k s_l + 2 g^{kl} s_k\,\partial_i s_l
\\
D_{ij} &:=& \partial_i\partial_j D \\
&=&
(\partial_i \partial_j g^{kl}) s_k s_l
+ 2 (\partial_i g^{kl}) s_k\,\partial_j s_l
+ 2 (\partial_j g^{kl}) s_k\,\partial_i s_l
\\&&
+ 2 g^{kl} \big(
(\partial_j s_k)(\partial_i s_l)
+ s_k \partial_i \partial_j s_l
\big).
\f}
"""
return self._covariant_normal(diff)
@cache_method_results()
def _covariant_normal(self, diff):
r"""Cached implementation of covariant_normal()."""
s = self.s
D = self.ABCD[3]
if diff == 0:
return s / np.sqrt(D)
ds = self.ds
dg_inv = self.dg_inv
g_inv = self.g.inv
if diff == 1:
# note: X.dot(y) for a n-d X and 1-d y contracts/sums the *last*
# index of X with y, i.e. X.dot(y) = sum_l X_ijkl y^l.
# This means X.dot(y) has n-1 free indices left.
# We now compute partial_i nu_j (note the indices i and j).
dnx, dny, dnz = [
ds[:,j] / np.sqrt(D) - 0.5 * (
s[j]/D**1.5 * np.array(
[dg_inv[i].dot(s).dot(s) + 2*g_inv.dot(s).dot(ds[i,:])
for i in range(3)]
)
)
for j in range(3)
]
return np.array([dnx, dny, dnz]).T
dds = self.compute_dds()
Di = self.compute_Di()
Dij = self.compute_Dij()
if diff == 2:
# We now compute partial_i partial_j nu_k.
ddnx, ddny, ddnz = [
dds[:,:,k] / np.sqrt(D)
- 1/(2*D**1.5) * (
np.outer(ds[:,k], Di) + np.outer(Di, ds[:,k]) + s[k] * Dij
)
+ 3./4. * s[k] / D**2.5 * np.outer(Di, Di)
for k in range(3)
]
return np.array([ddnx, ddny, ddnz]).T # partial derivs. commute
raise NotImplementedError
def compute_Di(self):
r"""Compute the D_i terms for covariant_normal().
See covariant_normal() for the derivation of the used formulas.
"""
g_inv = self.g.inv
dg_inv = self.dg_inv
s = self.s
ds = self.ds
return dg_inv.dot(s).dot(s) + 2 * ds.dot(g_inv.dot(s))
def compute_Dij(self):
r"""Compute the D_ij terms for covariant_normal().
See covariant_normal() for the derivation of the used formulas.
"""
g_inv = self.g.inv
dg_inv = self.dg_inv
ddg_inv = np.asarray(self.metric.diff(self.point, diff=2, inverse=True))
s = self.s
ds = self.ds
dds = self.compute_dds()
return (
ddg_inv.dot(s).dot(s)
+ 2 * dg_inv.dot(s).dot(ds)
+ 2 * dg_inv.dot(s).dot(ds).T
+ 2 * g_inv.dot(ds).T.dot(ds) + 2 * dds.dot(g_inv.dot(s))
)
@cache_method_results()
def compute_dds(self):
r"""Compute the second derivatives of the non-normalized normal."""
return self._compute_dds_Z()[0]
@cache_method_results()
def compute_d2_Y(self):
r"""Compute second derivatives of xi and lambda \wrt x,y,z."""
return self._compute_d2_Y()
@cache_method_results()
def compute_d3_Z(self):
r"""Compute third derivatives of xi and lambda \wrt x,y,z."""
return self._compute_d3_Z()
def covariant_riemann(self):
r"""Compute the purely covariant Riemann tensor.
This computes the only independent component
\f[
R_{\lambda\varphi\,\lambda\varphi}
= q_{\lambda A} R^A_{\ \varphi\,\lambda\varphi}
\f]
of the covariant Riemann tensor.
"""
q = self.induced_metric()
R0_101, R1_101 = self.riemann()
R_0101 = q[0,0] * R0_101 + q[0,1] * R1_101
return R_0101
def riemann(self):
r"""Compute the components of the Riemann tensor on the surface.
The Riemann tensor computed here is defined as
\f[
R^A_{\ B\,CD} =
\partial_C \Gamma^A_{DB}
- \partial_D \Gamma^A_{CB}
+ \Gamma^A_{CE} \Gamma^E_{DB}
- \Gamma^A_{DE} \Gamma^E_{CB},
\f]
where \f$\Gamma^{A}_{BC}\f$ are the Christoffel symbols of the induced
2-metric `q`.
Due to the antisymmetry in the last two indices, only two components
may potentially be nonzero, namely
\f$R^\lambda_{\ \varphi\,\lambda_\varphi}\f$ and
\f$R^\varphi{\ \varphi\,\lambda_\varphi}\f$. These two components are
returned here.
"""
G = self.christoffel()
dG = self.christoffel_deriv()
R0_101 = riemann_components(G, dG, 0, 1, 0, 1)
R1_101 = riemann_components(G, dG, 1, 1, 0, 1)
return R0_101, R1_101
def christoffel(self):
r"""Compute the Christoffel symbols of the induced metric on the surface.
@return NumPy array with indices `[A,B,C]` corresponding to
\f$\Gamma^A_{BC}\f$.
"""
q_inv = self.induced_metric(inverse=True)
dq = self.induced_metric(diff=1)
return christoffel_symbols(q_inv, dq)
def christoffel_deriv(self):
r"""Compute the derivatives of the Christoffel symbols on the surface.
@return NumPy array with indices `[A,B,C,D]` corresponding to
\f$\partial_A\Gamma^B_{CD}\f$.
"""
q_inv = self.induced_metric(inverse=True)
dq_inv = self.induced_metric(inverse=True, diff=1)
dq = self.induced_metric(diff=1)
ddq = self.induced_metric(diff=2)
return christoffel_deriv(q_inv, dq_inv, dq, ddq)
def extrinsic_curvature(self, trace=False, square=False):
r"""Compute the extrinsic curvature.
@param trace
If `True`, returns the trace of the extrinsic curvature. Default
is `False`. May not be used together with `square`.
@param square
If `True`, returns the square \f$k_{AB} k^{AB}\f$. Default is
`False`. May not be used together with `trace`.
@return If ``trace=square=False``, a NumPy 2x2 array containing the
components of `k_AB`. Otherwise, returns a float.
@b Notes
To get the components \f$k_{AB} = -\nabla_A \nu_B\f$, first note that
`k` annihilates any components transverse to the surface \f$\sigma\f$
(see e.g. [1]), i.e. for any point \f$p \in \sigma\f$
\f[
k(v_p, X_p) = 0
\qquad \forall\,X_p\in T_p M,
\f]
where \f$v\f$ is any vector field normal to \f$\sigma\f$, for example
the normal \f$\nu\f$ in the current slice \f$\Sigma\f$ or the future
pointing normal `n` of the slice in spacetime.
Hence, we will in the following restrict all objects to \f$\sigma\f$.
For example,
\f[
dx^\mu\big|_\sigma = \frac{\partial x^\mu}{\partial u^A}\ du^A
=: x^\mu_{,A}\ du^A,
\f]
where \f$u^A = \lambda,\varphi\f$. The \f$x^\mu_{,A}\f$ are computed
in diff_xyz_wrt_laph(). Note that \f$x^0_{,A} = 0\f$ since
\f$x^0 = t\f$ does not depend on \f$\lambda\f$ or \f$\varphi\f$.
Observing further that \f$\partial_A = x^a_{,A}\partial_a\f$, we get
\f{eqnarray*}{
\nabla_{\!\partial_A} \nu_B
&=& \big[x^a_{,A} \nabla_a \underline\nu\big]_B
\\ &=& x^a_{,A} \big[
(\partial_a\nu_\beta - \Gamma^\alpha_{a\beta}\nu_\alpha)\ dx^\beta
\big]_B
\\ &=& x^a_{,A} \big[
(\partial_a\nu_b - \Gamma^c_{ab}\nu_c)\ dx^b
\big]_B
\\ &=& x^a_{,A} (\partial_a\nu_b - \Gamma^c_{ab}\nu_c) x^b_{,B}
\\ &=& x^a_{,A} x^b_{,B}
(\partial_a\nu_b - {}^{(3)}\Gamma^c_{ab}\nu_c).
\f}
The third equality is due to \f$dx^0\big|_\sigma = 0\f$ and
\f$\nu_0 = 0\f$. The reason we can take the Christoffel symbols of the
3-metric in the slice is that, by their definition and using
\f$g_{ab} = {}^{(4)}g_{ab} + n_a n_b\f$,
\f{eqnarray*}{
(\Gamma^k_{ab} - {}^{(3)}\Gamma^k_{ab}) \nu_k
&=& \frac{1}{2} (g^{kc} - n^k n^c) \big[
- \partial_c (g_{ab} - n_a n_b)
+ \partial_a (g_{bc} - n_b n_c)
+ \partial_b (g_{ca} - n_c n_a)
\big] \nu_k
\\&&
- \frac{1}{2} g^{kc} \big[
- \partial_c g_{ab}
+ \partial_a g_{bc}
+ \partial_b g_{ca}
\big] \nu_k
\\ &=& \frac{1}{2} \nu^c \big[
\partial_c (n_a n_b)
- \partial_a (n_b n_c)
- \partial_b (n_c n_a)
\big]
\\ &=& 0.
\f}
The first equality is due to \f$n^k \nu_k = 0\f$ (`n` is orthogonal to
any horizontal vectors, i.e. in \f$T_p\Sigma\f$) and the last
equation due to \f$n_\mu = 0\f$ for \f$\mu \neq 0\f$.
@b References
[1] D. Giulini. "Dynamical and Hamiltonian Formulation of General
Relativity". In: Springer handbook of spacetime. Ed. by A.
Ashtekar and V. Petkov. Springer, 2014. Chap. 17.
"""
if trace and square:
raise ValueError("Arguments `trace` and `square` are mutually exclusive.")
ra2 = range(2)
ra3 = range(3)
G3 = self.metric.christoffel(self.point)
nu = self.covariant_normal(diff=0)
dn = self.covariant_normal(diff=1) # i,j -> del_i nu_j
dx = self.diff_xyz_wrt_laph(diff=1) # shape=(2,3), A,i -> del_A x^i
def _K(A, B):
return - (
fsum(dx[A,i]*dx[B,j] * dn[i,j]
for j in ra3 for i in ra3)
- fsum(dx[A,i]*dx[B,j] * G3[k,i,j]*nu[k]
for k in ra3 for j in ra3 for i in ra3)
)
K_AB = np.array([[_K(A,B) for B in ra2] for A in ra2])
if trace or square:
q_inv = self.induced_metric(inverse=True)
if trace:
return q_inv.dot(K_AB).diagonal().sum()
return np.einsum('ac,bd,ab,cd', q_inv, q_inv, K_AB, K_AB)
return K_AB
class _FuncVariation(object):
r"""Helper class to apply an offset to a specific derivative of a function.
Given a function `f`, an offset `eps` is applied to the n'th derivative of
the function. Here `n` is given by the `diff` parameter.
This is used to compute the finite difference approximation of the
derivative of the expansion w.r.t. \f$ h \f$, \f$ h' \f$, and \f$ h'' \f$.
"""
def __init__(self, f, diff, eps=0):
r"""Create a callable and modify one derivative order.
Args:
f: Callable that should also implement `f.diff()`, e.g. an
evaluator of the motsfinder.exprs system.
diff: Derivative order of `f` to modify. `0` means that `eps` will
be added to any function value computed by `f` but not to
derivatives. A value of ``n>0`` means that `f` and all its
derivatives are returned "as is", except for the n'th
derivative to which the value of `eps` will be added.
eps: Value to add to the results of computing the `diff`'th
derivative of `f`.
"""
## The function to wrap.
self._f = f
## The derivative order of the function to add `eps` to.
self._diff = diff
## The value to add to the specified derivative order.
self.eps = eps
def __call__(self, x):
r"""Evaluate the function at a point.
In case `diff==0`, the `eps` will be added.
"""
val = self._f(x)
if self._diff == 0:
val += self.eps
return val
def diff(self, x, n=1):
r"""Evaluate the n'the derivaative of the function at a point.
In case `diff==n`, the `eps` will be added.
"""
val = self._f.diff(x, n=n)
if self._diff == n:
val += self.eps
return val
| [
"[email protected]"
] | |
90d887816136ef7ea406db5120f7ddfd8554e2c9 | f58a1dcae97115b566409704dcf1a46a5f86df47 | /Bellevue University/Courses/DSC640/Matplotlib for python Developers/7900_Code/Chapter 03/7900_03_15.py | 20b070bc387075f7ddde931334627adb0d70a5ca | [] | no_license | safarie1103/Safarie1103 | 318519ace23c33fcf6d36337392156e5381abd49 | a86172bfc47eff0af65285b641af0ad26e13fd12 | refs/heads/master | 2023-06-13T01:43:35.761325 | 2023-06-07T16:01:16 | 2023-06-07T16:01:16 | 205,732,823 | 0 | 1 | null | 2022-11-28T15:55:13 | 2019-09-01T21:11:38 | null | UTF-8 | Python | false | false | 169 | py | #!/usr/bin/python
import matplotlib.pyplot as plt
plt.figure(figsize=(3,3))
x = [45, 35, 20]
labels = ['Cats', 'Dogs', 'Fishes']
plt.pie(x, labels = labels)
plt.show() | [
"[email protected]"
] | |
f5e1b29ce42118842f5d23bc27518c4a367946ef | 8585e7b3bbb71218fcb4dcb8fb99b46f6973ed72 | /healthack/construct_dirs.py | 9d57d47edf4a4cc7a436084333b5f4f8b8baa03f | [] | no_license | koike-ya/health | eeed56a8940d1c30333069a2ab339bb6d5937118 | 87bd1842d49e34abef8c66f666b6526d3fb18522 | refs/heads/master | 2021-10-11T13:59:19.625847 | 2019-01-27T08:55:09 | 2019-01-27T08:55:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | from pathlib import Path
Path(Path.cwd() / "setup.py").touch(exist_ok=True)
Path(Path.cwd() / "config").mkdir(exist_ok=True)
Path(Path.cwd() / "config" / "const.py").touch(exist_ok=True)
Path(Path.cwd() / "notebooks").mkdir(exist_ok=True)
Path(Path.cwd() / "data" / "processed").mkdir(exist_ok=True, parents=True)
Path(Path.cwd() / "models").mkdir(exist_ok=True)
Path(Path.cwd() / "src" / "data" / "raw").mkdir(exist_ok=True, parents=True)
Path(Path.cwd() / "src" / "features").mkdir(exist_ok=True)
Path(Path.cwd() / "src" / "models").mkdir(exist_ok=True)
Path(Path.cwd() / "src" / "visualization").mkdir(exist_ok=True)
Path(Path.cwd() / "reports" / "figures").mkdir(exist_ok=True, parents=True)
Path(Path.cwd() / "reports" / "results").mkdir(exist_ok=True)
Path(Path.cwd() / "logs").mkdir(exist_ok=True)
| [
"[email protected]"
] | |
f26b1b6ad59d2ffe788cde505e0b664cb9acd360 | 06f036994ccb76d93cfc2a7ae9fc3d5c221724ee | /qmla/analysis/outputs.py | 4b778feb0d5864237e030dc64637e88c4d4e65fd | [
"MIT"
] | permissive | flynnbr11/QMLA | 13b43b7789632ca18306b8818fd90b702cc139eb | ac8cfe1603658ee9b916452f29b99460ee5e3d44 | refs/heads/master | 2023-05-04T04:58:28.461900 | 2021-10-27T22:10:35 | 2021-10-27T22:10:35 | 95,233,038 | 15 | 11 | MIT | 2023-03-31T14:59:08 | 2017-06-23T15:33:06 | Python | UTF-8 | Python | false | false | 15,532 | py | import os
import sys
import numpy as np
import pandas as pd
import pickle
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.gridspec import GridSpec
from scipy import stats
import lfig
__all__ = ["plot_dynamics_multiple_models"]
def format_exponent(n):
a = "%E" % n
val = a.split("E")[0].rstrip("0").rstrip(".")
val = np.round(float(val), 2)
exponent = a.split("E")[1]
return str(val) + "E" + exponent
def plot_dynamics_multiple_models(
directory_name,
results_path,
results_file_name_start="results",
use_experimental_data=False,
dataset=None,
true_expectation_value_path=None,
probes_plot_file=None,
exploration_rule=None,
unique_exploration_classes=None,
top_number_models=2,
save_true_expec_vals_alone_plot=True,
collective_analysis_pickle_file=None,
return_results=False,
save_to_file=None,
figure_format="png",
):
r"""
Plots reproduced dynamics against time
for the top models, i.e. those which win the most.
TODO: refactor this code - it should not need to unpickle
all the files which have already been unpickled and stored in the summary
results CSV.
TODO: this is a very old method and can surely be improved using Pandas dataframes now stored.
:param directory_name: path to directory where results .p files are stored.
:param results_path: path to CSV with all results for this run.
:param results_file_name_start:
:param use_experimental_data: bool, whether experimental (fixed) data was used.
:param true_expectation_value_path: path to file containing pre-computed expectation
values.
:param probes_plot_file: path to file with specific probes (states) to use
for plotting purposes for consistency.
:param exploration_rule: the name of the exploration strategy used.
:param unique_exploration_classes: dict with single instance of each exploration strategy class
used in this run.
:param top_number_models: Number of models to compute averages for
(top by number of instance wins).
:param true_params_dict: dict with true parameter for each parameter in the
true model.
:param save_true_expec_vals_alone_plot: bool, whether to save a
separate plot only of true expectation values, in addition
to reproduced dynamics.
:param collective_analysis_pickle_file: if not None, store analysed data
to this path.
:param return_results: bool, to return the analysed data upon function call.
:param save_to_file: if not None, path to save PNG.
:returns None:
"""
plt.switch_backend("agg")
# results = pd.DataFrame.from_csv(
results = pd.read_csv(results_path, index_col="QID")
all_winning_models = list(results.loc[:, "NameAlphabetical"])
def rank_models(n):
return sorted(set(n), key=n.count)[::-1]
# from
# https://codegolf.stackexchange.com/questions/17287/sort-the-distinct-elements-of-a-list-in-descending-order-by-frequency
if len(all_winning_models) > top_number_models:
winning_models = rank_models(all_winning_models)[0:top_number_models]
else:
winning_models = list(set(all_winning_models))
cm_subsection = np.linspace(0, 0.8, len(winning_models))
colours = [cm.viridis(x) for x in cm_subsection]
experimental_measurements = pickle.load(
open(str(true_expectation_value_path), "rb")
)
expectation_values_by_name = {}
os.chdir(directory_name)
pickled_files = []
for file in os.listdir(directory_name):
# if file.endswith(".p") and file.startswith("results"):
if file.endswith(".p") and file.startswith(results_file_name_start):
pickled_files.append(file)
num_results_files = len(pickled_files)
exploration_strategies = {}
for f in pickled_files:
fname = directory_name + "/" + str(f)
result = pickle.load(open(fname, "rb"))
alph = result["NameAlphabetical"]
expec_values = result["ExpectationValues"]
if alph in expectation_values_by_name.keys():
expectation_values_by_name[alph].append(expec_values)
else:
expectation_values_by_name[alph] = [expec_values]
if alph not in list(exploration_strategies.keys()):
exploration_strategies[alph] = result["ExplorationRule"]
exploration_classes = {}
for g in list(exploration_strategies.keys()):
try:
exploration_classes[g] = unique_exploration_classes[
exploration_strategies[g]
]
except BaseException:
exploration_classes[g] = None
try:
true_model = unique_exploration_classes[exploration_rule].true_model
except BaseException:
print(
"Couldn't find exploration strategy of {} in \n {}".format(
exploration_rule, unique_exploration_classes
)
)
raise
collect_expectation_values = {
"means": {},
"medians": {},
"true": {},
"mean_std_dev": {},
"success_rate": {},
"r_squared": {},
}
success_rate_by_term = {}
nmod = len(winning_models)
if nmod == 1:
lf = lfig.LatexFigure(
auto_label=False,
)
else:
ncols = int(np.ceil(np.sqrt(nmod)))
nrows = int(np.ceil(nmod / ncols)) + 1 # 1 extra row for "master"
lf = lfig.LatexFigure(auto_label=False, gridspec_layout=(nrows, ncols))
axes_so_far = 1
full_plot_axis = lf.new_axis(force_position=(0, 0), span=(1, "all"))
model_statistics = {}
for term in winning_models:
expectation_values = {}
num_sets_of_this_name = len(expectation_values_by_name[term])
for i in range(num_sets_of_this_name):
learned_expectation_values = expectation_values_by_name[term][i]
for t in list(experimental_measurements.keys()):
try:
expectation_values[t].append(learned_expectation_values[t])
except BaseException:
try:
expectation_values[t] = [learned_expectation_values[t]]
except BaseException:
# if t can't be found, move on
pass
means = {}
std_dev = {}
true = {}
t_values = {}
lower_iqr_expectation_values = {}
higher_iqr_expectation_values = {}
# times = sorted(list(experimental_measurements.keys()))
true_times = sorted(list(expectation_values.keys()))
times = sorted(list(expectation_values.keys()))
times = [np.round(t, 2) if t > 0.1 else t for t in times]
flag = True
one_sample = True
for t in times:
means[t] = np.mean(expectation_values[t])
std_dev[t] = np.std(expectation_values[t])
lower_iqr_expectation_values[t] = np.percentile(expectation_values[t], 25)
higher_iqr_expectation_values[t] = np.percentile(expectation_values[t], 75)
true[t] = experimental_measurements[t]
if num_sets_of_this_name > 1:
expec_values_array = np.array([[i] for i in expectation_values[t]])
# print("shape going into ttest:", np.shape(true_expec_values_array))
if use_experimental_data == True:
t_val = stats.ttest_1samp(
expec_values_array, # list of expec vals for this t
true[t], # true expec val of t
axis=0,
nan_policy="omit",
)
else:
true_dist = stats.norm.rvs(
loc=true[t], scale=0.001, size=np.shape(expec_values_array)
)
t_val = stats.ttest_ind(
expec_values_array, # list of expec vals for this t
true_dist, # true expec val of t
axis=0,
nan_policy="omit",
)
if np.isnan(float(t_val[1])) == False:
# t_values[t] = 1-t_val[1]
t_values[t] = t_val[1]
else:
print("t_val is nan for t=", t)
true_exp = [true[t] for t in times]
# TODO should this be the number of times this model won???
num_runs = num_sets_of_this_name
success_rate = 0
for t in times:
true_likelihood = true[t]
mean = means[t]
std = std_dev[t]
credible_region = (2 / np.sqrt(num_runs)) * std
if (true_likelihood < (mean + credible_region)) and (
true_likelihood > (mean - credible_region)
):
success_rate += 1 / len(times)
mean_exp = np.array([means[t] for t in times])
std_dev_exp = np.array([std_dev[t] for t in times])
lower_iqr_exp = np.array([lower_iqr_expectation_values[t] for t in times])
higher_iqr_exp = np.array([higher_iqr_expectation_values[t] for t in times])
residuals = (mean_exp - true_exp) ** 2
sum_residuals = np.sum(residuals)
mean_true_val = np.mean(true_exp)
true_mean_minus_val = (true_exp - mean_true_val) ** 2
sum_of_squares = np.sum(true_mean_minus_val)
if sum_of_squares != 0:
final_r_squared = 1 - sum_residuals / sum_of_squares
else:
print("[multiQMD plots] sum of squares 0")
final_r_squared = -100
# R^2 for interquartile range
lower_iqr_sum_residuals = np.sum((lower_iqr_exp - true_exp) ** 2)
lower_iqr_sum_of_squares = np.sum((lower_iqr_exp - np.mean(lower_iqr_exp)) ** 2)
lower_iqr_r_sq = 1 - (lower_iqr_sum_residuals / lower_iqr_sum_of_squares)
higher_iqr_sum_residuals = np.sum((higher_iqr_exp - true_exp) ** 2)
higher_iqr_sum_of_squares = np.sum(
(higher_iqr_exp - np.mean(higher_iqr_exp)) ** 2
)
higher_iqr_r_sq = 1 - (higher_iqr_sum_residuals / higher_iqr_sum_of_squares)
name = exploration_classes[term].latex_name(term)
description = r"{}".format(name)
if term == true_model:
description += " (= $\hat{{H}}_0$)"
description_w_bayes_t_value = str(
name
+ " : "
+ str(round(success_rate, 2))
+ " ("
+ str(num_sets_of_this_name)
+ ")."
)
collect_expectation_values["means"][name] = mean_exp
collect_expectation_values["mean_std_dev"][name] = std_dev_exp
collect_expectation_values["success_rate"][name] = success_rate
model_statistics[name] = {
"r_squared_median_exp_val": final_r_squared,
"mean_expectation_values": mean_exp,
"mean_std_dev": std_dev_exp,
"success_rate_t_test": success_rate,
"num_wins": num_sets_of_this_name,
"win_percentage": int(100 * num_sets_of_this_name / num_results_files),
"num_instances": num_results_files,
"lower_iqr_exp_val": lower_iqr_exp,
"higher_iqr_exp_val": higher_iqr_exp,
"lower_iqr_r_sq": lower_iqr_r_sq,
"higher_iqr_r_sq": higher_iqr_r_sq,
"times": times,
}
if nmod > 1:
ax = lf.new_axis()
ax.plot(
times,
mean_exp,
c=colours[winning_models.index(term)],
label=description,
)
ax.fill_between(
times,
mean_exp - std_dev_exp,
mean_exp + std_dev_exp,
alpha=0.2,
facecolor=colours[winning_models.index(term)],
)
ax.set_ylim(0, 1)
ax.set_xlim(0, max(times))
success_rate_by_term[term] = success_rate
ax.set_title("Mean Expectation Values")
ax.scatter(times, true_exp, color="r", s=5, label="System")
ax.plot(times, true_exp, color="r", alpha=0.3)
ax.set_yticks([0, 0.5, 1.0])
ax.set_title(description)
# Add this model to "master" plot
high_level_label = str(name)
if term == true_model:
high_level_label += " (= $\hat{{H}}_0$)"
full_plot_axis.plot(
times,
mean_exp,
c=colours[winning_models.index(term)],
label=high_level_label,
)
full_plot_axis.scatter(times, true_exp, color="r", s=5, label="System")
full_plot_axis.plot(times, true_exp, color="r", alpha=0.3)
full_plot_axis.legend(
ncol=5,
)
full_plot_axis.set_ylim(0, 1.25)
full_plot_axis.set_yticks([0, 0.5, 1.0])
full_plot_axis.set_xlim(0, max(times))
if nmod > 1:
lf.fig.text(0.45, -0.04, "Time", ha="center")
lf.fig.text(-0.04, 0.5, "Expectation Value", va="center", rotation="vertical")
else:
full_plot_axis.set_ylabel("Expectation value")
full_plot_axis.set_xlabel("Time (a.u)")
if save_to_file is not None:
lf.fig.suptitle("Dynamics of trained models")
lf.save(save_to_file, file_format=figure_format)
# Also save an image of the only the system dynamics
if save_true_expec_vals_alone_plot == True and save_to_file is not None:
lf = lfig.LatexFigure(fraction=0.75, auto_label=False)
ax = lf.new_axis()
ax.plot(
times,
true_exp,
marker="o",
color="r",
label="System"
# alpha = 0.3
)
ax.set_xlabel("Time")
ax.set_ylabel("Expectation Value")
ax.legend()
true_only_fig_file = str(save_to_file + "_system")
ax.set_title("True model dynamics")
lf.save(true_only_fig_file, file_format=figure_format)
# add the combined analysis dict
collect_expectation_values["times"] = true_times
collect_expectation_values["true"] = true_exp
if collective_analysis_pickle_file is not None:
if os.path.isfile(collective_analysis_pickle_file) is False:
pickle.dump(model_statistics, open(collective_analysis_pickle_file, "wb"))
else:
# load current analysis dict, add to it and rewrite it.
combined_analysis = pickle.load(open(collective_analysis_pickle_file, "rb"))
for model in model_statistics.keys():
new_keys = list(model_statistics[model].keys())
for key in new_keys:
combined_analysis[model][key] = model_statistics[model][key]
pickle.dump(combined_analysis, open(collective_analysis_pickle_file, "wb"))
else:
print("[analyse] collective analysis path:", collective_analysis_pickle_file)
if return_results == True:
expectation_values_by_latex_name = {}
for term in winning_models:
latex_name = unique_exploration_classes[exploration_rule].latex_name(term)
expectation_values_by_latex_name[latex_name] = expectation_values_by_name[
term
]
return (
times,
mean_exp,
std_dev_exp,
winning_models,
term,
true,
description,
expectation_values_by_latex_name,
expectation_values_by_name,
)
| [
"[email protected]"
] | |
e0c7b019890ee9b53c7d4fa7a809375a21ccfc2b | 4864e58bb9ac93c34f2988f50bec143fbe7b5278 | /blog/migrations/0019_auto_20210114_1845.py | db6d08a76a1bd9374d50ba620e0ae0600f5031ca | [] | no_license | Subhrans/Blog_App | 7e536b868645c5ffc0a35a4a63b206ddd5ab0965 | a81b4adeb8c0cb3bea5ffa85c6f1e2954c23e54a | refs/heads/main | 2023-04-21T05:25:09.113818 | 2021-05-08T18:23:38 | 2021-05-08T18:23:38 | 327,252,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | # Generated by Django 3.0.1 on 2021-01-14 13:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0018_auto_20210114_0331'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='like',
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('like_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.UserProfile')),
('like_post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Post')),
],
),
]
| [
"[email protected]"
] | |
590d8055aec2d9c89103005c1b857abc0f8fab1d | 7d4a3504bb9daa2589e2580de21e0e15c334787c | /tst/select/select_suite.py | 1226f95721a4f90fecbad8c9e3c507bf770c10a1 | [
"MIT"
] | permissive | ericwu/pumbaa | 4cfac76c29dc35223a3df1a89dbde2f6bbf44719 | e355c0a9ec28cfcfa3daabea9ba4c7cb55907efb | refs/heads/master | 2021-01-13T16:53:42.408105 | 2017-01-23T18:31:04 | 2017-01-23T18:31:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,832 | py | #
# @section License
#
# The MIT License (MIT)
#
# Copyright (c) 2016, Erik Moqvist
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# This file is part of the Pumbaa project.
#
import select
import board
from sync import Event, Queue
from drivers import Can, Uart
import harness
from harness import assert_raises
def test_help():
poll = select.poll()
help(select)
help(poll)
def test_register_unregister():
poll = select.poll()
queue = Queue()
event = Event()
can = Can(board.CAN_0)
uart = Uart(1)
poll.register(queue)
poll.register(event)
poll.register(can)
poll.register(uart)
poll.unregister(queue)
poll.unregister(event)
poll.unregister(can)
poll.unregister(uart)
with assert_raises(OSError):
poll.unregister(queue)
def test_poll():
poll = select.poll()
queue = Queue()
event = Event()
can = Can(board.CAN_0)
uart = Uart(1)
# Register both event channels.
poll.register(queue)
poll.register(event)
poll.register(can)
poll.register(uart)
# Timeout waiting for event.
assert poll.poll(0.01) == []
# Event write, poll and read.
event.write(0x1)
assert poll.poll() == [(event, select.POLLIN)]
assert event.read(0x1) == 0x1
# Queue write, poll and read.
queue.write(b'foo')
assert poll.poll() == [(queue, select.POLLIN)]
assert queue.read(3) == b'foo'
def test_bad_arguments():
poll = select.poll()
with assert_raises(TypeError, "channel object required"):
poll.register(None)
with assert_raises(OSError):
poll.unregister(None)
TESTCASES = [
(test_help, "test_help"),
(test_register_unregister, "test_register_unregister"),
(test_poll, "test_poll"),
(test_bad_arguments, "test_bad_arguments")
]
| [
"[email protected]"
] | |
194f0811f4c2ee6cea15e7c6797edd4496899d26 | d282fe910d95b3f23254e5e0d5309c082de81419 | /Ent/E4/demo_skylineviewer.py | 2bf167edf196ad83e52e2d6df59c9ed989dffc8f | [
"Apache-2.0"
] | permissive | Mi7ai/Algoritmia2 | 54fff6d3925ddc8067303d2e507ccde8ba9a025a | 2f1c7b3990e4971f4f977fd0ea4d308004ab3db5 | refs/heads/master | 2021-06-21T18:45:57.875364 | 2021-03-22T12:54:47 | 2021-03-22T12:54:47 | 207,576,996 | 0 | 0 | null | 2021-03-22T12:54:47 | 2019-09-10T14:08:46 | Assembly | UTF-8 | Python | false | false | 346 | py | from Utils.skylineviewer import SkylineViewer
buildings = [(1, 10, 3), (2, 5, 5), (3, 6, 3), (4, 7, 5), (10, 10, 3), (9, 4, 6), (20, 8, 4), (22, 6, 6), (25, 10, 2)]
skyline = [1, 10, 4, 7, 9, 4, 10, 10, 13, 4, 15, 0, 20, 8, 24, 6, 25, 10, 27, 6, 28]
viewer = SkylineViewer(skyline)
for b in buildings:
viewer.add_building(b)
viewer.run() | [
"[email protected]"
] | |
323b5f80a3a048ee37471233c5e6663d18ed90b6 | c7e765a9bed33d3bfb21774e3995bf4a09e04add | /adminmgr/media/code/A3/task3/BD_198_225_960_HjAczew.py | 153f36b21dacb14ce5be9b6efaf652ff426bd9c2 | [
"Apache-2.0"
] | permissive | IamMayankThakur/test-bigdata | 13dd2ac7fb76c9baed6c3a0aa943057a22e2d237 | 7f507918c7bec31c92eedcd94491a83486623049 | refs/heads/master | 2022-05-03T00:59:44.127494 | 2022-02-10T19:50:16 | 2022-02-10T19:50:16 | 201,585,028 | 10 | 4 | Apache-2.0 | 2022-04-22T23:39:45 | 2019-08-10T05:34:09 | Python | UTF-8 | Python | false | false | 1,380 | py | import findspark
findspark.init()
from pyspark import SparkConf,SparkContext
from pyspark.streaming import StreamingContext
#from pyspark.sql import Row,SQLContext
import sys
import requests
import re
from operator import add
def process_rdd(time, rdd):
# print("----------=========- %s -=========----------" % str(time))
row_rdd = rdd.map(lambda w:(w[0],w[1]))
maximum = row_rdd.take(6)
hashh=""
i=0
while i<len(maximum):
if(maximum[i][0]!=''):
if i==(len(maximum)-1):
hashh=hashh+str(maximum[i][0])
else:
hashh=hashh+str(maximum[i][0])+","
i=i+1
print("%s"%(hashh))
if len(sys.argv) != 3:
print("Should enter file, Window Size, Batch Duration", file=sys.stderr)
sys.exit(-1)
wind_size=int(sys.argv[1])
batch_duration=int(sys.argv[2])
conf=SparkConf()
conf.setAppName("BigData")
sc=SparkContext(conf=conf)
ssc=StreamingContext(sc,batch_duration)
ssc.checkpoint("home/hduser/checkpoint_BIGDATA")
dataStream=ssc.socketTextStream("localhost",9009)
tweet=dataStream.map(lambda w:(w.split(';')[7]))
hashtag=tweet.flatMap(lambda w:(w.split(',')))
hasht=hashtag.map(lambda w:(w,1))
counts=hasht.filter(lambda x:x!=None)
totalcount=counts.reduceByKeyAndWindow(lambda a,b: a+b, wind_size, batch_duration).transform(lambda rdd: rdd.sortBy(lambda y: (-y[1],y[0])))
totalcount.foreachRDD(process_rdd)
ssc.start()
ssc.awaitTermination(25)
ssc.stop()
| [
"[email protected]"
] | |
31dd5aa0b605e5b6df526693e182a266f34af494 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02719/s761928282.py | a68bf0b133d3fcf6004cc605a97270348340d011 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | N, K = list(map(lambda n: int(n), input().split(" ")))
while N > int(K/2):
N = min([abs(N-K), N % K])
print(N) | [
"[email protected]"
] | |
97bcf5441c08a2e89ecd3c1db61840c55422e13d | 0b0d3246d39974cb8faff7d269da2d539415afab | /problem_python/p283.py | 3a6704dc2d9bb763a3c83885a6be6906ca384262 | [] | no_license | xionghhcs/leetcode | 972e7ae4ca56b7100223630b294b5a97ba5dd7e8 | 8bd43dcd995a9de0270b8cea2d9a48df17ffc08b | refs/heads/master | 2020-03-07T17:18:08.465559 | 2019-09-29T11:11:26 | 2019-09-29T11:11:26 | 127,607,564 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | class Solution:
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
def swap(a, i, j):
tmp = a[i]
a[i] = a[j]
a[j] = tmp
for i in range(len(nums)):
if nums[i] == 0:
j = i + 1
while j < len(nums) and nums[j] == 0:
j += 1
if j != len(nums):
swap(nums, i, j)
| [
"[email protected]"
] | |
4c81f1a6460a9bb7ccdc7063e49e475861567b6c | 164ffe077dde59373ad9fadcfd727f279a1cfe93 | /jni_build/jni/include/tensorflow/python/ops/numerics.py | bd96d9a72cc653be506d2fa812d7b341a44bdafe | [] | no_license | Basofe/Community_Based_Repository_Traffic_Signs | 524a4cfc77dc6ed3b279556e4201ba63ee8cf6bd | a20da440a21ed5160baae4d283c5880b8ba8e83c | refs/heads/master | 2021-01-22T21:17:37.392145 | 2017-09-28T21:35:58 | 2017-09-28T21:35:58 | 85,407,197 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,826 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Connects all half, float and double tensors to CheckNumericsOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
def verify_tensor_all_finite(t, msg, name=None):
"""Assert that the tensor does not contain any NaN's or Inf's.
Args:
t: Tensor to check.
msg: Message to log on failure.
name: A name for this operation (optional).
Returns:
Same tensor as `t`.
"""
with ops.op_scope([t], name, "VerifyFinite") as name:
t = ops.convert_to_tensor(t, name="t")
with ops.colocate_with(t):
verify_input = array_ops.check_numerics(t, message=msg)
out = control_flow_ops.with_dependencies([verify_input], t)
return out
def add_check_numerics_ops():
"""Connect a `check_numerics` to every floating point tensor.
`check_numerics` operations themselves are added for each `half`, `float`,
or `double` tensor in the graph. For all ops in the graph, the
`check_numerics` op for all of its (`half`, `float`, or `double`) inputs
is guaranteed to run before the `check_numerics` op on any of its outputs.
Returns:
A `group` op depending on all `check_numerics` ops added.
"""
check_op = []
# This code relies on the ordering of ops in get_operations().
# The producer of a tensor always comes before that tensor's consumer in
# this list. This is true because get_operations() returns ops in the order
# added, and an op can only be added after its inputs are added.
for op in ops.get_default_graph().get_operations():
for output in op.outputs:
if output.dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
message = op.name + ":" + str(output.value_index)
with ops.control_dependencies(check_op):
check_op = [array_ops.check_numerics(output, message=message)]
return control_flow_ops.group(*check_op)
| [
"[email protected]"
] | |
37490c2e36c27fde373437a4c3e932557b84fc75 | 4d360320e06339a4f7d2a2723cddf02ff02a306e | /0x06-python-classes/3-square.py | f4c53a022a14d9265165fae9db6fb90f9362d80f | [] | no_license | AmineNeifer/holbertonschool-higher_level_programming | fd6ccdb1b5f0dc85e10750e9f2c7824290697e85 | f5c42bff003b85a7c19702e0233997645fce2fb1 | refs/heads/master | 2020-09-29T02:56:52.286548 | 2020-05-15T00:12:50 | 2020-05-15T00:12:50 | 226,933,206 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | #!/usr/bin/python3
class Square:
def __init__(self, size=0):
"""Args:
size: size of the Square.
"""
if type(size) is not int:
raise TypeError("size must be an integer")
elif size < 0:
raise ValueError("size must be >= 0")
else:
self.__size = size
def area(self):
"""Returns:
the area of the square (size)
"""
return self.__size * self.__size
| [
"[email protected]"
] | |
a95c0d6cbc8db379f298e52cebd758fbec611534 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/layout/uniformtext/_mode.py | a3fdc332612749ea2308c67e4091aac3c79eb00a | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 468 | py | import _plotly_utils.basevalidators
class ModeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="mode", parent_name="layout.uniformtext", **kwargs):
super(ModeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
values=kwargs.pop("values", [False, "hide", "show"]),
**kwargs
)
| [
"[email protected]"
] | |
8c4471b5f0528f4af6e01a68521acbbfc97f6172 | 600df3590cce1fe49b9a96e9ca5b5242884a2a70 | /mojo/public/tools/bindings/generators/mojom_cpp_generator.py | 07eb45bfc3ec2681a844efcd9b5295fe3abc7456 | [
"BSD-3-Clause"
] | permissive | metux/chromium-suckless | efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a | 72a05af97787001756bae2511b7985e61498c965 | refs/heads/orig | 2022-12-04T23:53:58.681218 | 2017-04-30T10:59:06 | 2017-04-30T23:35:58 | 89,884,931 | 5 | 3 | BSD-3-Clause | 2022-11-23T20:52:53 | 2017-05-01T00:09:08 | null | UTF-8 | Python | false | false | 25,270 | py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates C++ source files from a mojom.Module."""
import mojom.generate.generator as generator
import mojom.generate.module as mojom
import mojom.generate.pack as pack
from mojom.generate.template_expander import UseJinja
_kind_to_cpp_type = {
mojom.BOOL: "bool",
mojom.INT8: "int8_t",
mojom.UINT8: "uint8_t",
mojom.INT16: "int16_t",
mojom.UINT16: "uint16_t",
mojom.INT32: "int32_t",
mojom.UINT32: "uint32_t",
mojom.FLOAT: "float",
mojom.INT64: "int64_t",
mojom.UINT64: "uint64_t",
mojom.DOUBLE: "double",
}
_kind_to_cpp_literal_suffix = {
mojom.UINT8: "U",
mojom.UINT16: "U",
mojom.UINT32: "U",
mojom.FLOAT: "f",
mojom.UINT64: "ULL",
}
# TODO(rockot): Get rid of these globals. This requires some refactoring of the
# generator library code so that filters can use the generator as context.
_current_typemap = {}
_for_blink = False
_use_new_wrapper_types = False
# TODO(rockot, yzshen): The variant handling is kind of a hack currently. Make
# it right.
_variant = None
class _NameFormatter(object):
"""A formatter for the names of kinds or values."""
def __init__(self, token, variant):
self._token = token
self._variant = variant
def Format(self, separator, prefixed=False, internal=False,
include_variant=False, add_same_module_namespaces=False,
flatten_nested_kind=False):
"""Formats the name according to the given configuration.
Args:
separator: Separator between different parts of the name.
prefixed: Whether a leading separator should be added.
internal: Returns the name in the "internal" namespace.
include_variant: Whether to include variant as namespace. If |internal| is
True, then this flag is ignored and variant is not included.
add_same_module_namespaces: Includes all namespaces even if the token is
from the same module as the current mojom file.
flatten_nested_kind: It is allowed to define enums inside structs and
interfaces. If this flag is set to True, this method concatenates the
parent kind and the nested kind with '_', instead of treating the
parent kind as a scope."""
parts = []
if self._ShouldIncludeNamespace(add_same_module_namespaces):
if prefixed:
parts.append("")
parts.extend(self._GetNamespace())
if include_variant and self._variant and not internal:
parts.append(self._variant)
parts.extend(self._GetName(internal, flatten_nested_kind))
return separator.join(parts)
def FormatForCpp(self, add_same_module_namespaces=False, internal=False,
flatten_nested_kind=False):
return self.Format(
"::", prefixed=True,
add_same_module_namespaces=add_same_module_namespaces,
internal=internal, include_variant=True,
flatten_nested_kind=flatten_nested_kind)
def FormatForMojom(self):
return self.Format(".", add_same_module_namespaces=True)
def _MapKindName(self, token, internal):
if not internal:
return token.name
if (mojom.IsStructKind(token) or mojom.IsUnionKind(token) or
mojom.IsEnumKind(token)):
return token.name + "_Data"
return token.name
def _GetName(self, internal, flatten_nested_kind):
if isinstance(self._token, mojom.EnumValue):
name_parts = _NameFormatter(self._token.enum, self._variant)._GetName(
internal, flatten_nested_kind)
name_parts.append(self._token.name)
return name_parts
name_parts = []
if internal:
name_parts.append("internal")
if (flatten_nested_kind and mojom.IsEnumKind(self._token) and
self._token.parent_kind):
name = "%s_%s" % (self._token.parent_kind.name,
self._MapKindName(self._token, internal))
name_parts.append(name)
return name_parts
if self._token.parent_kind:
name_parts.append(self._MapKindName(self._token.parent_kind, internal))
name_parts.append(self._MapKindName(self._token, internal))
return name_parts
def _ShouldIncludeNamespace(self, add_same_module_namespaces):
return add_same_module_namespaces or self._token.imported_from
def _GetNamespace(self):
if self._token.imported_from:
return NamespaceToArray(self._token.imported_from["namespace"])
elif hasattr(self._token, "module"):
return NamespaceToArray(self._token.module.namespace)
return []
def ConstantValue(constant):
return ExpressionToText(constant.value, kind=constant.kind)
# TODO(yzshen): Revisit the default value feature. It was designed prior to
# custom type mapping.
def DefaultValue(field):
if field.default:
if mojom.IsStructKind(field.kind):
assert field.default == "default"
if not IsTypemappedKind(field.kind):
return "%s::New()" % GetNameForKind(field.kind)
return ExpressionToText(field.default, kind=field.kind)
if not _use_new_wrapper_types:
if mojom.IsArrayKind(field.kind) or mojom.IsMapKind(field.kind):
return "nullptr";
if mojom.IsStringKind(field.kind):
return "" if _for_blink else "nullptr"
return ""
def NamespaceToArray(namespace):
return namespace.split(".") if namespace else []
def GetNameForKind(kind, internal=False, flatten_nested_kind=False,
add_same_module_namespaces=False):
return _NameFormatter(kind, _variant).FormatForCpp(
internal=internal, flatten_nested_kind=flatten_nested_kind,
add_same_module_namespaces=add_same_module_namespaces)
def GetQualifiedNameForKind(kind, internal=False, flatten_nested_kind=False):
return _NameFormatter(kind, _variant).FormatForCpp(
internal=internal, add_same_module_namespaces=True,
flatten_nested_kind=flatten_nested_kind)
def GetFullMojomNameForKind(kind):
return _NameFormatter(kind, _variant).FormatForMojom()
def IsTypemappedKind(kind):
return hasattr(kind, "name") and \
GetFullMojomNameForKind(kind) in _current_typemap
def IsNativeOnlyKind(kind):
return (mojom.IsStructKind(kind) or mojom.IsEnumKind(kind)) and \
kind.native_only
def IsHashableKind(kind):
"""Check if the kind can be hashed.
Args:
kind: {Kind} The kind to check.
Returns:
{bool} True if a value of this kind can be hashed.
"""
checked = set()
def Check(kind):
if kind.spec in checked:
return True
checked.add(kind.spec)
if mojom.IsNullableKind(kind):
return False
elif mojom.IsStructKind(kind):
if (IsTypemappedKind(kind) and
not _current_typemap[GetFullMojomNameForKind(kind)]["hashable"]):
return False
return all(Check(field.kind) for field in kind.fields)
elif mojom.IsUnionKind(kind):
return all(Check(field.kind) for field in kind.fields)
elif mojom.IsAnyHandleKind(kind):
return False
elif mojom.IsAnyInterfaceKind(kind):
return False
# TODO(tibell): Arrays and maps could be made hashable. We just don't have a
# use case yet.
elif mojom.IsArrayKind(kind):
return False
elif mojom.IsMapKind(kind):
return False
else:
return True
return Check(kind)
def GetNativeTypeName(typemapped_kind):
return _current_typemap[GetFullMojomNameForKind(typemapped_kind)]["typename"]
def GetCppPodType(kind):
if mojom.IsStringKind(kind):
return "char*"
return _kind_to_cpp_type[kind]
def GetCppWrapperType(kind, add_same_module_namespaces=False):
def _AddOptional(type_name):
pattern = "WTF::Optional<%s>" if _for_blink else "base::Optional<%s>"
return pattern % type_name
if IsTypemappedKind(kind):
type_name = GetNativeTypeName(kind)
if (mojom.IsNullableKind(kind) and
not _current_typemap[GetFullMojomNameForKind(kind)][
"nullable_is_same_type"]):
type_name = _AddOptional(type_name)
return type_name
if mojom.IsEnumKind(kind):
return GetNameForKind(
kind, add_same_module_namespaces=add_same_module_namespaces)
if mojom.IsStructKind(kind) or mojom.IsUnionKind(kind):
return "%sPtr" % GetNameForKind(
kind, add_same_module_namespaces=add_same_module_namespaces)
if mojom.IsArrayKind(kind):
pattern = None
if _use_new_wrapper_types:
pattern = "WTF::Vector<%s>" if _for_blink else "std::vector<%s>"
if mojom.IsNullableKind(kind):
pattern = _AddOptional(pattern)
else:
pattern = "mojo::WTFArray<%s>" if _for_blink else "mojo::Array<%s>"
return pattern % GetCppWrapperType(
kind.kind, add_same_module_namespaces=add_same_module_namespaces)
if mojom.IsMapKind(kind):
pattern = None
if _use_new_wrapper_types:
pattern = ("WTF::HashMap<%s, %s>" if _for_blink else
"std::unordered_map<%s, %s>")
if mojom.IsNullableKind(kind):
pattern = _AddOptional(pattern)
else:
pattern = "mojo::WTFMap<%s, %s>" if _for_blink else "mojo::Map<%s, %s>"
return pattern % (
GetCppWrapperType(
kind.key_kind,
add_same_module_namespaces=add_same_module_namespaces),
GetCppWrapperType(
kind.value_kind,
add_same_module_namespaces=add_same_module_namespaces))
if mojom.IsInterfaceKind(kind):
return "%sPtr" % GetNameForKind(
kind, add_same_module_namespaces=add_same_module_namespaces)
if mojom.IsInterfaceRequestKind(kind):
return "%sRequest" % GetNameForKind(
kind.kind, add_same_module_namespaces=add_same_module_namespaces)
if mojom.IsAssociatedInterfaceKind(kind):
return "%sAssociatedPtrInfo" % GetNameForKind(
kind.kind, add_same_module_namespaces=add_same_module_namespaces)
if mojom.IsAssociatedInterfaceRequestKind(kind):
return "%sAssociatedRequest" % GetNameForKind(
kind.kind, add_same_module_namespaces=add_same_module_namespaces)
if mojom.IsStringKind(kind):
if _for_blink:
return "WTF::String"
if not _use_new_wrapper_types:
return "mojo::String"
type_name = "std::string"
return _AddOptional(type_name) if mojom.IsNullableKind(kind) else type_name
if mojom.IsGenericHandleKind(kind):
return "mojo::ScopedHandle"
if mojom.IsDataPipeConsumerKind(kind):
return "mojo::ScopedDataPipeConsumerHandle"
if mojom.IsDataPipeProducerKind(kind):
return "mojo::ScopedDataPipeProducerHandle"
if mojom.IsMessagePipeKind(kind):
return "mojo::ScopedMessagePipeHandle"
if mojom.IsSharedBufferKind(kind):
return "mojo::ScopedSharedBufferHandle"
if not kind in _kind_to_cpp_type:
raise Exception("Unrecognized kind %s" % kind.spec)
return _kind_to_cpp_type[kind]
def IsMoveOnlyKind(kind):
if IsTypemappedKind(kind):
if mojom.IsEnumKind(kind):
return False
return _current_typemap[GetFullMojomNameForKind(kind)]["move_only"]
if mojom.IsStructKind(kind) or mojom.IsUnionKind(kind):
return True
if mojom.IsArrayKind(kind):
return IsMoveOnlyKind(kind.kind) if _use_new_wrapper_types else True
if mojom.IsMapKind(kind):
return IsMoveOnlyKind(kind.value_kind) if _use_new_wrapper_types else True
if mojom.IsAnyHandleOrInterfaceKind(kind):
return True
return False
def IsCopyablePassByValue(kind):
if not IsTypemappedKind(kind):
return False
return _current_typemap[GetFullMojomNameForKind(kind)][
"copyable_pass_by_value"]
def ShouldPassParamByValue(kind):
return ((not mojom.IsReferenceKind(kind)) or IsMoveOnlyKind(kind) or
IsCopyablePassByValue(kind))
def GetCppWrapperParamType(kind):
cpp_wrapper_type = GetCppWrapperType(kind)
return (cpp_wrapper_type if ShouldPassParamByValue(kind)
else "const %s&" % cpp_wrapper_type)
def GetCppFieldType(kind):
if mojom.IsStructKind(kind):
return ("mojo::internal::Pointer<%s>" %
GetNameForKind(kind, internal=True))
if mojom.IsUnionKind(kind):
return "%s" % GetNameForKind(kind, internal=True)
if mojom.IsArrayKind(kind):
return ("mojo::internal::Pointer<mojo::internal::Array_Data<%s>>" %
GetCppFieldType(kind.kind))
if mojom.IsMapKind(kind):
return ("mojo::internal::Pointer<mojo::internal::Map_Data<%s, %s>>" %
(GetCppFieldType(kind.key_kind), GetCppFieldType(kind.value_kind)))
if mojom.IsInterfaceKind(kind):
return "mojo::internal::Interface_Data"
if mojom.IsInterfaceRequestKind(kind):
return "mojo::internal::Handle_Data"
if mojom.IsAssociatedInterfaceKind(kind):
return "mojo::internal::AssociatedInterface_Data"
if mojom.IsAssociatedInterfaceRequestKind(kind):
return "mojo::internal::AssociatedInterfaceRequest_Data"
if mojom.IsEnumKind(kind):
return "int32_t"
if mojom.IsStringKind(kind):
return "mojo::internal::Pointer<mojo::internal::String_Data>"
if mojom.IsAnyHandleKind(kind):
return "mojo::internal::Handle_Data"
return _kind_to_cpp_type[kind]
def GetCppUnionFieldType(kind):
if mojom.IsUnionKind(kind):
return ("mojo::internal::Pointer<%s>" % GetNameForKind(kind, internal=True))
return GetCppFieldType(kind)
def GetUnionGetterReturnType(kind):
if mojom.IsReferenceKind(kind):
return "%s&" % GetCppWrapperType(kind)
return GetCppWrapperType(kind)
def GetUnionTraitGetterReturnType(kind):
"""Get field type used in UnionTraits template specialization.
The type may be qualified as UnionTraits specializations live outside the
namespace where e.g. structs are defined.
Args:
kind: {Kind} The type of the field.
Returns:
{str} The C++ type to use for the field.
"""
if mojom.IsReferenceKind(kind):
return "%s&" % GetCppWrapperType(kind, add_same_module_namespaces=True)
return GetCppWrapperType(kind, add_same_module_namespaces=True)
def GetCppDataViewType(kind, qualified=False):
def _GetName(input_kind):
return _NameFormatter(input_kind, None).FormatForCpp(
add_same_module_namespaces=qualified, flatten_nested_kind=True)
if mojom.IsEnumKind(kind):
return _GetName(kind)
if mojom.IsStructKind(kind) or mojom.IsUnionKind(kind):
return "%sDataView" % _GetName(kind)
if mojom.IsArrayKind(kind):
return "mojo::ArrayDataView<%s>" % GetCppDataViewType(kind.kind, qualified)
if mojom.IsMapKind(kind):
return ("mojo::MapDataView<%s, %s>" % (
GetCppDataViewType(kind.key_kind, qualified),
GetCppDataViewType(kind.value_kind, qualified)))
if mojom.IsStringKind(kind):
return "mojo::StringDataView"
if mojom.IsInterfaceKind(kind):
return "%sPtrDataView" % _GetName(kind)
if mojom.IsInterfaceRequestKind(kind):
return "%sRequestDataView" % _GetName(kind.kind)
if mojom.IsAssociatedInterfaceKind(kind):
return "%sAssociatedPtrInfoDataView" % _GetName(kind.kind)
if mojom.IsAssociatedInterfaceRequestKind(kind):
return "%sAssociatedRequestDataView" % _GetName(kind.kind)
if mojom.IsGenericHandleKind(kind):
return "mojo::ScopedHandle"
if mojom.IsDataPipeConsumerKind(kind):
return "mojo::ScopedDataPipeConsumerHandle"
if mojom.IsDataPipeProducerKind(kind):
return "mojo::ScopedDataPipeProducerHandle"
if mojom.IsMessagePipeKind(kind):
return "mojo::ScopedMessagePipeHandle"
if mojom.IsSharedBufferKind(kind):
return "mojo::ScopedSharedBufferHandle"
return _kind_to_cpp_type[kind]
def GetUnmappedTypeForSerializer(kind):
return GetCppDataViewType(kind, qualified=True)
def TranslateConstants(token, kind):
if isinstance(token, mojom.NamedValue):
return _NameFormatter(token, _variant).FormatForCpp(
flatten_nested_kind=True)
if isinstance(token, mojom.BuiltinValue):
if token.value == "double.INFINITY" or token.value == "float.INFINITY":
return "INFINITY";
if token.value == "double.NEGATIVE_INFINITY" or \
token.value == "float.NEGATIVE_INFINITY":
return "-INFINITY";
if token.value == "double.NAN" or token.value == "float.NAN":
return "NAN";
if (kind is not None and mojom.IsFloatKind(kind)):
return token if token.isdigit() else token + "f";
# Per C++11, 2.14.2, the type of an integer literal is the first of the
# corresponding list in Table 6 in which its value can be represented. In this
# case, the list for decimal constants with no suffix is:
# int, long int, long long int
# The standard considers a program ill-formed if it contains an integer
# literal that cannot be represented by any of the allowed types.
#
# As it turns out, MSVC doesn't bother trying to fall back to long long int,
# so the integral constant -2147483648 causes it grief: it decides to
# represent 2147483648 as an unsigned integer, and then warns that the unary
# minus operator doesn't make sense on unsigned types. Doh!
if kind == mojom.INT32 and token == "-2147483648":
return "(-%d - 1) /* %s */" % (
2**31 - 1, "Workaround for MSVC bug; see https://crbug.com/445618")
return "%s%s" % (token, _kind_to_cpp_literal_suffix.get(kind, ""))
def ExpressionToText(value, kind=None):
return TranslateConstants(value, kind)
def RequiresContextForDataView(kind):
for field in kind.fields:
if mojom.IsReferenceKind(field.kind):
return True
return False
def ShouldInlineStruct(struct):
# TODO(darin): Base this on the size of the wrapper class.
if len(struct.fields) > 4:
return False
for field in struct.fields:
if mojom.IsReferenceKind(field.kind) and not mojom.IsStringKind(field.kind):
return False
return True
def ContainsMoveOnlyMembers(struct):
for field in struct.fields:
if IsMoveOnlyKind(field.kind):
return True
return False
def ShouldInlineUnion(union):
return not any(
mojom.IsReferenceKind(field.kind) and not mojom.IsStringKind(field.kind)
for field in union.fields)
def GetContainerValidateParamsCtorArgs(kind):
if mojom.IsStringKind(kind):
expected_num_elements = 0
element_is_nullable = False
key_validate_params = "nullptr"
element_validate_params = "nullptr"
enum_validate_func = "nullptr"
elif mojom.IsMapKind(kind):
expected_num_elements = 0
element_is_nullable = False
key_validate_params = GetNewContainerValidateParams(mojom.Array(
kind=kind.key_kind))
element_validate_params = GetNewContainerValidateParams(mojom.Array(
kind=kind.value_kind))
enum_validate_func = "nullptr"
else: # mojom.IsArrayKind(kind)
expected_num_elements = generator.ExpectedArraySize(kind) or 0
element_is_nullable = mojom.IsNullableKind(kind.kind)
key_validate_params = "nullptr"
element_validate_params = GetNewContainerValidateParams(kind.kind)
if mojom.IsEnumKind(kind.kind):
enum_validate_func = ("%s::Validate" %
GetQualifiedNameForKind(kind.kind, internal=True,
flatten_nested_kind=True))
else:
enum_validate_func = "nullptr"
if enum_validate_func == "nullptr":
if key_validate_params == "nullptr":
return "%d, %s, %s" % (expected_num_elements,
"true" if element_is_nullable else "false",
element_validate_params)
else:
return "%s, %s" % (key_validate_params, element_validate_params)
else:
return "%d, %s" % (expected_num_elements, enum_validate_func)
def GetNewContainerValidateParams(kind):
if (not mojom.IsArrayKind(kind) and not mojom.IsMapKind(kind) and
not mojom.IsStringKind(kind)):
return "nullptr"
return "new mojo::internal::ContainerValidateParams(%s)" % (
GetContainerValidateParamsCtorArgs(kind))
class Generator(generator.Generator):
cpp_filters = {
"constant_value": ConstantValue,
"contains_handles_or_interfaces": mojom.ContainsHandlesOrInterfaces,
"contains_move_only_members": ContainsMoveOnlyMembers,
"cpp_wrapper_param_type": GetCppWrapperParamType,
"cpp_data_view_type": GetCppDataViewType,
"cpp_field_type": GetCppFieldType,
"cpp_union_field_type": GetCppUnionFieldType,
"cpp_pod_type": GetCppPodType,
"cpp_union_getter_return_type": GetUnionGetterReturnType,
"cpp_union_trait_getter_return_type": GetUnionTraitGetterReturnType,
"cpp_wrapper_type": GetCppWrapperType,
"default_value": DefaultValue,
"expression_to_text": ExpressionToText,
"get_container_validate_params_ctor_args":
GetContainerValidateParamsCtorArgs,
"get_name_for_kind": GetNameForKind,
"get_pad": pack.GetPad,
"get_qualified_name_for_kind": GetQualifiedNameForKind,
"has_callbacks": mojom.HasCallbacks,
"has_sync_methods": mojom.HasSyncMethods,
"requires_context_for_data_view": RequiresContextForDataView,
"should_inline": ShouldInlineStruct,
"should_inline_union": ShouldInlineUnion,
"is_array_kind": mojom.IsArrayKind,
"is_enum_kind": mojom.IsEnumKind,
"is_integral_kind": mojom.IsIntegralKind,
"is_native_only_kind": IsNativeOnlyKind,
"is_any_handle_kind": mojom.IsAnyHandleKind,
"is_any_interface_kind": mojom.IsAnyInterfaceKind,
"is_any_handle_or_interface_kind": mojom.IsAnyHandleOrInterfaceKind,
"is_associated_kind": mojom.IsAssociatedKind,
"is_hashable": IsHashableKind,
"is_map_kind": mojom.IsMapKind,
"is_nullable_kind": mojom.IsNullableKind,
"is_object_kind": mojom.IsObjectKind,
"is_reference_kind": mojom.IsReferenceKind,
"is_string_kind": mojom.IsStringKind,
"is_struct_kind": mojom.IsStructKind,
"is_typemapped_kind": IsTypemappedKind,
"is_union_kind": mojom.IsUnionKind,
"passes_associated_kinds": mojom.PassesAssociatedKinds,
"struct_size": lambda ps: ps.GetTotalSize() + _HEADER_SIZE,
"stylize_method": generator.StudlyCapsToCamel,
"under_to_camel": generator.UnderToCamel,
"unmapped_type_for_serializer": GetUnmappedTypeForSerializer,
}
def GetExtraTraitsHeaders(self):
extra_headers = set()
for entry in self.typemap.itervalues():
extra_headers.update(entry.get("traits_headers", []))
return list(extra_headers)
def GetExtraPublicHeaders(self):
extra_headers = set()
for entry in self.typemap.itervalues():
extra_headers.update(entry.get("public_headers", []))
return list(extra_headers)
def GetJinjaExports(self):
structs = self.GetStructs()
interfaces = self.GetInterfaces()
all_enums = list(self.module.enums)
for struct in structs:
all_enums.extend(struct.enums)
for interface in interfaces:
all_enums.extend(interface.enums)
return {
"module": self.module,
"namespace": self.module.namespace,
"namespaces_as_array": NamespaceToArray(self.module.namespace),
"imports": self.module.imports,
"kinds": self.module.kinds,
"enums": self.module.enums,
"all_enums": all_enums,
"structs": structs,
"unions": self.GetUnions(),
"interfaces": interfaces,
"variant": self.variant,
"extra_traits_headers": self.GetExtraTraitsHeaders(),
"extra_public_headers": self.GetExtraPublicHeaders(),
"for_blink": self.for_blink,
"use_new_wrapper_types": self.use_new_wrapper_types,
"export_attribute": self.export_attribute,
"export_header": self.export_header,
}
@staticmethod
def GetTemplatePrefix():
return "cpp_templates"
@classmethod
def GetFilters(cls):
return cls.cpp_filters
@UseJinja("module.h.tmpl")
def GenerateModuleHeader(self):
return self.GetJinjaExports()
@UseJinja("module.cc.tmpl")
def GenerateModuleSource(self):
return self.GetJinjaExports()
@UseJinja("module-shared.h.tmpl")
def GenerateModuleSharedHeader(self):
return self.GetJinjaExports()
@UseJinja("module-shared-internal.h.tmpl")
def GenerateModuleSharedInternalHeader(self):
return self.GetJinjaExports()
@UseJinja("module-shared.cc.tmpl")
def GenerateModuleSharedSource(self):
return self.GetJinjaExports()
def GenerateFiles(self, args):
if self.generate_non_variant_code:
self.Write(self.GenerateModuleSharedHeader(),
self.MatchMojomFilePath("%s-shared.h" % self.module.name))
self.Write(
self.GenerateModuleSharedInternalHeader(),
self.MatchMojomFilePath("%s-shared-internal.h" % self.module.name))
self.Write(self.GenerateModuleSharedSource(),
self.MatchMojomFilePath("%s-shared.cc" % self.module.name))
else:
global _current_typemap
_current_typemap = self.typemap
global _for_blink
_for_blink = self.for_blink
global _use_new_wrapper_types
_use_new_wrapper_types = self.use_new_wrapper_types
global _variant
_variant = self.variant
suffix = "-%s" % self.variant if self.variant else ""
self.Write(self.GenerateModuleHeader(),
self.MatchMojomFilePath("%s%s.h" % (self.module.name, suffix)))
self.Write(
self.GenerateModuleSource(),
self.MatchMojomFilePath("%s%s.cc" % (self.module.name, suffix)))
| [
"[email protected]"
] | |
33f989d69d8aef0c49fbf0a3dbee6ff0647c9d01 | 253f3a81b582ee53b86451dc5a06d6dc8923b0dd | /src/commands/commandslist.py | 946bf2c4b052a3394de39102411e47b230dc7f67 | [] | no_license | bdubyapee/akriosmud | c02ff2c9e3916efedc4837b19e02caf6255045f9 | d6c234e22fc56422315553217639bcb3e4c49984 | refs/heads/master | 2020-04-16T04:53:02.163852 | 2020-02-17T01:09:10 | 2020-02-17T01:09:10 | 165,284,647 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py | # Project: Akrios
# Filename: commands/commandlist.py
#
# Capability: player
#
# Command Description: Listing of currently available commands filtered by capabilities.
#
# By: Jubelo
from commands import *
name = "commandslist"
version = 1
requirements = {'capability': ['player'],
'generic_fail': "See {WHelp commandlist{x for help with this command.",
'truth_checks': [],
'false_checks': []}
@Command(**requirements)
def commandslist(caller, args, **kwargs):
header = f"{{rCommands Available{{x"
caller.write(f"{header:^80}")
caller.write("")
sub_header = f"{{BPlease see {{Whelp <command>{{B for additional information{{x"
caller.write(f"{sub_header:^80}")
caller.write("")
cmd_list = [cmd for cmd in Command.commandhash
if set(Command.commandcapability[cmd]) & set(caller.capability)]
cmd_list.sort()
numcols = 4
while (len(cmd_list) % numcols) > 0:
cmd_list.append(' ')
for i in range(0, len(cmd_list), numcols):
output = ''
for l in range(0, numcols):
output = f"{output}{cmd_list[i+l]:20}"
caller.write(output)
caller.write("")
caller.write("\n\r{WUsage{x: <command> <optional arguments>")
| [
"[email protected]"
] | |
ebe543088903155d46d06e03f07284f75a632e35 | 09996c147d498e61352683c5e7df0f3cd517ea27 | /test/oldcrab/whelicity_DataDoubleEl_SE_GH_cfg.py | 485f08167cd51bc90018ac079a2e540233e35f86 | [] | no_license | shchenarani/whelicityAnalyzer | 3e3320a6d03eab21de6d51dad60f057b6a2f3d47 | 8b4586f7210c6a166b949470c22310b25683da4f | refs/heads/master | 2021-09-10T12:22:52.088849 | 2018-03-26T07:52:54 | 2018-03-26T07:52:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,739 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
process.source = cms.Source ("PoolSource",fileNames = readFiles)
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
## configure process options
process.options = cms.untracked.PSet(
allowUnscheduled = cms.untracked.bool(True),
wantSummary = cms.untracked.bool(True)
)
## configure geometry & conditions
process.load("Configuration.Geometry.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.GlobalTag import GlobalTag
process.load("Configuration.StandardSequences.MagneticField_cff")
process.GlobalTag.globaltag = '80X_mcRun2_asymptotic_2016_TrancheIV_v6'
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
process.source = cms.Source ("PoolSource",fileNames = readFiles)
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
process.source = cms.Source ("PoolSource",fileNames = readFiles)
readFiles.extend( [
'root://xrootd-cms.infn.it//store/data/Run2016E/DoubleEG/MINIAOD/03Feb2017-v1/110000/EA7C2D56-A1EA-E611-86B2-0CC47A13CC7A.root']);
#
# Set up electron ID (VID framework)
#
from PhysicsTools.SelectorUtils.tools.vid_id_tools import *
# turn on VID producer, indicate data format to be
# DataFormat.AOD or DataFormat.MiniAOD, as appropriate
useAOD = False
if useAOD == True :
dataFormat = DataFormat.AOD
else :
dataFormat = DataFormat.MiniAOD
switchOnVIDElectronIdProducer(process, dataFormat)
# define which IDs we want to produce
my_id_modules = ['RecoEgamma.ElectronIdentification.Identification.cutBasedElectronID_Summer16_80X_V1_cff']
#add them to the VID producer
for idmod in my_id_modules:
setupAllVIDIdsInModule(process,idmod,setupVIDElectronSelection)
process.load("whelicity1.MiniAnalyzer.whelicity_cff")
process.Whelicity.isData = cms.bool(True)
process.Whelicity.isPythia = cms.bool(False)
process.Whelicity.isSingleElectron = cms.bool(True)
process.Whelicity.DiEl = cms.bool(True)
process.Whelicity.muonISOSF = cms.string("ISOEfficienciesAndSF_GH.root")
process.Whelicity.muonIDSF = cms.string("IDEfficienciesAndSF_GH.root")
process.Whelicity.outFileName = cms.string("tree.root")
process.TFileService = cms.Service("TFileService",
fileName = cms.string("histos.root")
)
# Make sure to add the ID sequence upstream from the user analysis module
process.p = cms.Path(process.egmGsfElectronIDSequence * process.Whelicity)
| [
"[email protected]"
] | |
29a1f916d57515291923131169a4a24024185702 | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /aav/util/model_utils.py | 70f397164e6eed14c9701edf79d67e33b353a1f5 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 3,577 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model utilities for extracting information from training checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pandas
import tensorflow as tf
def get_best_checkpoint_path(
model_dir, metric='loss', eval_subdir='eval_one_pass'):
"""Gets the path of the best checkpoint by given metric.
Args:
model_dir: (str) Path to tf.Estimator model.
metric: (str) Model evaluation metric over which to optimize.
eval_subdir: (str) Subdir path within model_dir to search for evaluation
events.
Returns:
(str) The path to the model best checkpoint.
Raises:
ValueError: If the given metric is not supported.
"""
events = tf.event_accumulator.EventAccumulator(
os.path.join(model_dir, eval_subdir))
events.Reload() # Actually read the event files into memory.
step = None
if metric == 'precision':
step = _get_best_checkpoint_step(events, metric, higher_is_better=True)
elif metric == 'loss':
step = _get_best_checkpoint_step(events, metric, higher_is_better=False)
elif metric == 'accuracy':
step = _get_best_checkpoint_step(events, metric, higher_is_better=True)
elif metric == 'recall':
step = _get_best_checkpoint_step(events, metric, higher_is_better=True)
else:
raise ValueError('Unknown metric "%s" is not supported' % metric)
return os.path.join(model_dir, 'model.ckpt-%d' % step)
def _get_best_checkpoint_step(
events, metric_key='precision', higher_is_better=True):
"""Gets the global step number of the best checkpoint by given metric.
Args:
events: (tf.Events) The summary events for a model evaluation.
metric_key: (str) The model evaluation metric key to optimize over.
higher_is_better: (bool) Is a higher value of the metric better?
Returns:
(int) The global step number of the best checkpoint.
"""
summary_df = pandas.DataFrame([
{'step': entry.step, metric_key: entry.value}
for entry in events.Scalars(metric_key)
])
metric = summary_df[metric_key]
best_index = None
if higher_is_better:
best_index = metric.idxmax()
else:
best_index = metric.idxmin()
best_checkpoint = summary_df.iloc[best_index]
return best_checkpoint.step
| [
"[email protected]"
] | |
e7d70888cafcdf8d8f016d284cb11462549acf2c | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_189/ch30_2019_08_26_19_06_32_624802.py | 0bf8b64bd817ff2689f3209d5285dc5d58edda50 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | def distancia(ang,vel):
import math
dist=((vel**2)*math.sin(2*ang))/9.8
return dist
if 98<=distancia(ang,vel)<=102:
print("Acertou!")
elif distancia(ang,vel)>102:
print("Muito longe")
else:
print("Muito perto")
| [
"[email protected]"
] | |
0294350b106cf605e7bc42c9069605f5e39f7c89 | 22bf2740e893b5020088b0d47f7b57eb2f9a2b5f | /version3/source/insert.py | 435e9f58028892f9d0b4a196e61675f5c9c0353e | [] | no_license | letianccc/latin_database | f2c1f9c58f398d322f722a3b1dade2296a2da19a | 1aa6c7eed57f6ea72d6e82e0a19b7a9614fb34c8 | refs/heads/master | 2021-04-15T09:26:48.132616 | 2018-04-12T23:40:17 | 2018-04-12T23:40:17 | 126,196,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | from version3.source.catalog import Catalog
class Insert:
def __init__(self, table_name, tuple_, tran_id=None):
self.table_name = table_name
self.tuple = tuple_
self.tran_id = tran_id
def execute(self):
hf = Catalog.name_to_file(self.table_name)
hf.insert_tuple(self.tuple, self.tran_id, 'X')
| [
"[email protected]"
] | |
07e22436ef683aac6402b919ca8971015eb64d89 | 938a496fe78d5538af94017c78a11615a8498682 | /algorithms/401-500/434.number-of-segments-in-a-string.py | 30057d9c241f8500cb524ddb181e94c26947fdca | [] | no_license | huilizhou/Leetcode-pyhton | 261280044d15d0baeb227248ade675177efdb297 | 6ae85bf79c5a21735e3c245c0c256f29c1c60926 | refs/heads/master | 2020-03-28T15:57:52.762162 | 2019-11-26T06:14:13 | 2019-11-26T06:14:13 | 148,644,059 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | # 字符串中的单词数
class Solution(object):
def countSegments(self, s):
"""
:type s: str
:rtype: int
"""
# 人家的解法
s = s.strip()
if len(s) == 0:
return 0
else:
sum = 0
s = s.split(' ')
for v in s:
if v != '':
sum += 1
return sum
# return len(s.split())
print(Solution().countSegments("Hello, my name is John"))
| [
"[email protected]"
] | |
8a76e10c251ffe4ca5c2f5adaf0fea29dc57a5b3 | 29ec9a3ba90f12da111d3e25cf75bc7c3db5d8ac | /tests/test_core.py | 27223608f8d79526ceb4cdbed94134a7a6f2049f | [] | no_license | frnsys/drip | cf16d1d917dc7433bb2b279e6dcea18d0394a2ae | 9b9733900c6ca799650e665f228c525dfa143476 | refs/heads/master | 2021-01-10T11:06:57.133358 | 2016-02-28T04:00:31 | 2016-02-28T04:00:31 | 52,692,220 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,581 | py | import json
from datetime import datetime
from drip.datastore import db
from drip.cluster import cluster
from drip.nlp import title, multisummarize
from drip.models import Event, Story, Article, Feed, Source, Keyword
from tests import TestCase
class CoreTest(TestCase):
def setUp(self):
self.events = json.load(open('tests/data/events.json', 'r'))
self.source = Source('test source')
self.feed = Feed('http://nytimes.com', self.source)
db.session.add(self.source)
db.session.add(self.feed)
db.session.commit()
def article_factory(self, **kwargs):
defaults = {
'url': 'http://nytimes.com/sup',
'text': 'sup',
'html': '<h1>sup</h1>',
'title': 'Sup',
'image': 'http:://nytimes.com/sup.jpg',
'published': datetime(day=1, month=1, year=2015),
'authors': ['Yo Go'],
'keywords': ['sup', 'yo'],
'feed': self.feed
}
defaults.update(kwargs)
return Article(**defaults)
def test_title(self):
expected = [
'Jeremy Thorpe, former Liberal party leader, dies aged 85',
'Woman Arrested in U.S. Teacher\'s Stabbing Death in Abu Dhabi',
'Faces keyboardist Ian McLagan dies',
'China to stop using executed prisoners as source of organs for transplant',
'James Bond movie to be called Spectre'
]
for e, expected in zip(self.events, expected):
articles = [self.article_factory(title=a['title'], text=a['text']) for a in e]
t = title(articles)
self.assertEqual(t, expected)
def test_cluster(self):
articles = []
true_events = []
for e in self.events:
arts = [self.article_factory(title=a['title'], text=a['text']) for a in e]
true_events.append(arts)
articles += arts
clusters = cluster(articles, [])
# Clusters might not be in the same order as the true events
for clus in clusters:
for evs in true_events:
if set(clus.articles) == set(evs):
break
else:
self.fail('Cluster:\n\t{}\ndid not match any expected cluster'.format(
[a.title for a in clus.articles]
))
def test_summarize(self):
articles = []
for e in self.events:
articles = [self.article_factory(title=a['title'], text=a['text']) for a in e]
summary = multisummarize(articles)
# This is more of a placeholder test atm
self.assertTrue(isinstance(summary, list))
def test_keywords(self):
data = [
('This is a title: Spectre', 'The story is about Spectre'),
('A really cool title', 'Spectre is the new film'),
('Yet another title', 'The new title is Spectre')
]
events = []
articles = []
for _ in range(2):
arts = [self.article_factory(title=title, text=text, keywords=['spectre']) for title, text in data]
event = Event(arts[0])
for a in arts[1:]:
event.add(a)
event.update()
articles += arts
events.append(event)
db.session.add(event)
story = Story(events[0])
story.add(events[1])
story.update()
db.session.add(story)
db.session.commit()
keyword = Keyword.query.filter_by(name='spectre').first()
self.assertEqual(set(keyword.subjects.all()), set(articles + events + [story]))
def test_story_candidates(self):
data = [
('This is a title: Spectre', 'The story is about Spectre'),
('A really cool title', 'Spectre is the new film'),
('Yet another title', 'The new title is Spectre')
]
events = []
articles = []
for _ in range(3):
arts = [self.article_factory(title=title, text=text, keywords=['spectre']) for title, text in data]
event = Event(arts[0])
for a in arts[1:]:
event.add(a)
event.update()
articles += arts
events.append(event)
db.session.add(event)
story = Story(events[0])
story.add(events[1])
story.update()
db.session.add(story)
db.session.commit()
event = events[-1]
candidates = Story.candidates(event)
self.assertEqual(candidates[0][0], story)
| [
"[email protected]"
] | |
cb9110d27d0004f8563f7d1a8891ee2eb95d49ef | 4bfc3c184e736bb68dccbb6d5657f11c950df002 | /tests/common/test_run/atan2_run.py | 93b020c539675af05a758ffe81d5f4fcf035e136 | [
"Apache-2.0",
"Zlib",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] | permissive | laekov/akg | 159aa64ef6135222b5af784c408731275dfa9bdb | 5316b8cb2340bbf71bdc724dc9d81513a67b3104 | refs/heads/master | 2022-12-01T04:09:03.548063 | 2020-08-19T08:38:57 | 2020-08-19T08:41:28 | 288,678,192 | 0 | 0 | Apache-2.0 | 2020-08-19T08:41:30 | 2020-08-19T08:36:53 | Python | UTF-8 | Python | false | false | 1,719 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""run function for arctangent2"""
import numpy as np
from tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from test_op import atan2
from gen_random import random_gaussian
from base import get_rtol_atol
def atan2_run(shape1, dtype1, shape2, dtype2, attrs):
"""run function for arctangent2"""
mod = utils.op_build_test(atan2.atan2, [shape1, shape2], [dtype1, dtype2],
kernel_name="atan2", attrs=attrs)
expect, inputs, out_buf = gen_data(shape1, dtype1, shape2, dtype2)
output = utils.mod_launch(mod, (*inputs, out_buf), expect=expect)
rtol, atol = get_rtol_atol("atan2", dtype1)
cmp_res = compare_tensor(output, expect, rtol=rtol, atol=atol)
return inputs, output, expect, cmp_res
def gen_data(shape1, dtype1, shape2, dtype2):
"""generate valid data for arctangent2"""
input1 = random_gaussian(shape1, miu=0, sigma=0.5).astype(dtype1)
input2 = random_gaussian(shape2, miu=0, sigma=0.5).astype(dtype2)
expect = np.arctan2(input1, input2)
out_buf = np.full(shape1, np.nan, dtype1)
return expect, (input1, input2), out_buf
| [
"[email protected]"
] | |
b45b2b0b78cb932e1697bb6f7b744db3618136d2 | 855e455b7113d32ad7bebca8e64cece441308b70 | /adafruit_matrixportal/wifi.py | e9411a9fe01ef6b5288209b6bb65a68689dbccaa | [
"MIT"
] | permissive | dsstewa/Adafruit_CircuitPython_MatrixPortal | f7687077d9d8ac83980f5ec75a52e1ca3942a1e3 | 885fb7edfbda0b763dbddbf9865d3fa62528e4c9 | refs/heads/master | 2023-02-09T23:03:50.751414 | 2021-01-05T18:22:43 | 2021-01-05T18:22:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,845 | py | # SPDX-FileCopyrightText: 2020 Melissa LeBlanc-Williams, written for Adafruit Industries
#
# SPDX-License-Identifier: Unlicense
"""
`adafruit_matrixportal.wifi`
================================================================================
Helper library for the MatrixPortal M4 or Adafruit RGB Matrix Shield + Metro M4 Airlift Lite.
* Author(s): Melissa LeBlanc-Williams
Implementation Notes
--------------------
**Hardware:**
* `Adafruit MatrixPortal M4 <https://www.adafruit.com/product/4745>`_
* `Adafruit Metro M4 Express AirLift <https://www.adafruit.com/product/4000>`_
* `Adafruit RGB Matrix Shield <https://www.adafruit.com/product/2601>`_
* `64x32 RGB LED Matrix <https://www.adafruit.com/product/2278>`_
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
import gc
import board
import busio
from digitalio import DigitalInOut
import neopixel
from adafruit_esp32spi import adafruit_esp32spi, adafruit_esp32spi_wifimanager
import adafruit_esp32spi.adafruit_esp32spi_socket as socket
import adafruit_requests as requests
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_MatrixPortal.git"
class WiFi:
"""Class representing the ESP.
:param status_neopixel: The pin for the status NeoPixel. Use ``board.NEOPIXEL`` for the on-board
NeoPixel. Defaults to ``None``, not the status LED
:param esp: A passed ESP32 object, Can be used in cases where the ESP32 chip needs to be used
before calling the pyportal class. Defaults to ``None``.
:param busio.SPI external_spi: A previously declared spi object. Defaults to ``None``.
"""
def __init__(self, *, status_neopixel=None, esp=None, external_spi=None):
if status_neopixel:
self.neopix = neopixel.NeoPixel(status_neopixel, 1, brightness=0.2)
else:
self.neopix = None
self.neo_status(0)
self.requests = None
if esp: # If there was a passed ESP Object
self.esp = esp
if external_spi: # If SPI Object Passed
spi = external_spi
else: # Else: Make ESP32 connection
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
else:
esp32_ready = DigitalInOut(board.ESP_BUSY)
esp32_gpio0 = DigitalInOut(board.ESP_GPIO0)
esp32_reset = DigitalInOut(board.ESP_RESET)
esp32_cs = DigitalInOut(board.ESP_CS)
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
self.esp = adafruit_esp32spi.ESP_SPIcontrol(
spi, esp32_cs, esp32_ready, esp32_reset, esp32_gpio0
)
requests.set_socket(socket, self.esp)
self._manager = None
gc.collect()
def connect(self, ssid, password):
"""
Connect to WiFi using the settings found in secrets.py
"""
self.esp.connect({"ssid": ssid, "password": password})
self.requests = requests
def neo_status(self, value):
"""The status NeoPixel.
:param value: The color to change the NeoPixel.
"""
if self.neopix:
self.neopix.fill(value)
def manager(self, secrets):
"""Initialize the WiFi Manager if it hasn't been cached and return it"""
if self._manager is None:
self._manager = adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager(
self.esp, secrets, None
)
return self._manager
@property
def is_connected(self):
"""Return whether we are connected."""
return self.esp.is_connected
@property
def enabled(self):
"""Not currently disablable on the ESP32 Coprocessor"""
return True
| [
"[email protected]"
] | |
72461308ceaf06759f6556c4bf62da939683c9d0 | 1575d5acc07eb67cb4e3cd523a24bb1d39efcb84 | /nn-pima/cv2MLP.py | 3518ebea717713ebf5dd3b51f3e480d47cf3ed97 | [] | no_license | ChenLiangbo/DeepLearning | 4bd80ddb2a41b883ef70947a8b1fdb3b19656df0 | 3464c27116dc00bd597d2b9c25313964e1d89797 | refs/heads/master | 2020-12-24T12:39:27.666215 | 2017-05-09T13:49:44 | 2017-05-09T13:49:44 | 72,974,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,353 | py | #!usr/bin/env/python
# -*- coding: utf-8 -*-
import numpy as np
from bayesClassifier import BayesClassifier
import cv2
dataset = np.load('pima-indians.npy')
columns = np.hsplit(dataset,9)
xsample = np.hstack(columns[0:8])
ysample = columns[8]
shape = xsample.shape
xsample = np.float32(xsample)
ysample = np.float32(ysample)
print "xsample = ",xsample.shape
print "ysample = ",ysample.shape
# indexList = np.random.permutation(shape[0])
indexList = range(shape[0])
x_train = xsample[indexList[0:538]]
y_train = ysample[indexList[0:538]]
print "x_train.shape = ",x_train.shape
print "y_train.shape = ",y_train.shape
x_test = xsample[indexList[538:]]
y_test = ysample[indexList[538:]]
print "x_test.shape = ",x_test.shape
print "y_test.shape = ",y_test.shape
myBayes = BayesClassifier()
layers = np.array([8,15,1])
model = cv2.ANN_MLP()
model.create(layers)
params = dict( term_crit = (cv2.TERM_CRITERIA_COUNT, 3000, 0.01),
train_method = cv2.ANN_MLP_TRAIN_PARAMS_BACKPROP,
bp_dw_scale = 0.001,
bp_moment_scale = 0.0 )
model.train(x_train,y_train,None,params = params)
ret,resp = model.predict(x_test)
y_predict = resp.argmax(-1)
print "y_predict = ",(y_predict.shape,np.mean(y_predict == y_test))
print y_predict[0:10]
result = myBayes.f_measure(y_predict,y_test)
print "result = ",result
| [
"[email protected]"
] | |
f31463bbe7a8decc63c1e4e4655c094c558d31fe | c148fbeff2f5188baed64dfdd3d781b8b15bc7d5 | /tools/build_variables.py | db521a047960e0ee62638abc416ebbb65084f2cb | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | abhattce/pytorch | 46b37747456f9997634f3a11c94e5af61c60442e | 01d7d3de46a3766f60465d266ca204fa5c0df5c6 | refs/heads/master | 2020-05-15T01:45:12.371014 | 2019-04-18T06:52:44 | 2019-04-18T06:55:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,288 | py | # In the open-source build, these are generated into
# torch/csrc/{autgrad,jit}/generated. In fbcode, this distinction is
# not currently relevant so they are combined into one list.
from __future__ import absolute_import, division, print_function, unicode_literals
load("@bazel_skylib//lib:new_sets.bzl", "sets")
GENERATED_CPP = [
"Functions.cpp",
"THCUNN.cpp",
"THNN.cpp",
"VariableType_0.cpp",
"VariableType_1.cpp",
"VariableType_2.cpp",
"VariableType_3.cpp",
"VariableType_4.cpp",
"register_aten_ops_0.cpp",
"register_aten_ops_1.cpp",
"register_aten_ops_2.cpp",
"python_functions.cpp",
"python_nn_functions.cpp",
"python_torch_functions.cpp",
"python_variable_methods.cpp",
]
# copied from https://github.com/pytorch/pytorch/blob/master/tools/cpp_build/torch/CMakeLists.txt
libtorch_sources = [
":generate-code=Functions.cpp",
":generate-code=register_aten_ops_0.cpp",
":generate-code=register_aten_ops_1.cpp",
":generate-code=register_aten_ops_2.cpp",
":generate-code=VariableType_0.cpp",
":generate-code=VariableType_1.cpp",
":generate-code=VariableType_2.cpp",
":generate-code=VariableType_3.cpp",
":generate-code=VariableType_4.cpp",
"torch/csrc/autograd/VariableTypeManual.cpp",
"torch/csrc/autograd/anomaly_mode.cpp",
"torch/csrc/autograd/engine.cpp",
"torch/csrc/autograd/function.cpp",
"torch/csrc/autograd/functions/accumulate_grad.cpp",
"torch/csrc/autograd/functions/basic_ops.cpp",
"torch/csrc/autograd/functions/tensor.cpp",
"torch/csrc/autograd/functions/utils.cpp",
"torch/csrc/autograd/grad_mode.cpp",
"torch/csrc/autograd/input_buffer.cpp",
"torch/csrc/autograd/profiler.cpp",
"torch/csrc/autograd/record_function.cpp",
"torch/csrc/autograd/saved_variable.cpp",
"torch/csrc/autograd/variable.cpp",
"torch/csrc/Exceptions.cpp",
"torch/csrc/jit/autodiff.cpp",
"torch/csrc/jit/attributes.cpp",
"torch/csrc/jit/argument_spec.cpp",
"torch/csrc/jit/constants.cpp",
"torch/csrc/jit/node_hashing.cpp",
"torch/csrc/jit/export.cpp",
"torch/csrc/jit/pass_manager.cpp",
"torch/csrc/jit/pickler.cpp",
"torch/csrc/jit/graph_executor.cpp",
"torch/csrc/jit/import.cpp",
"torch/csrc/jit/interpreter.cpp",
"torch/csrc/jit/ir.cpp",
"torch/csrc/jit/irparser.cpp",
"torch/csrc/jit/netdef_converter.cpp",
"torch/csrc/jit/register_c10_ops.cpp",
"torch/csrc/jit/symbolic_script.cpp",
"torch/csrc/jit/profiling_record.cpp",
"torch/csrc/jit/operator.cpp",
"torch/csrc/jit/passes/alias_analysis.cpp",
"torch/csrc/jit/passes/batch_mm.cpp",
"torch/csrc/jit/passes/canonicalize_ops.cpp",
"torch/csrc/jit/passes/canonicalize.cpp",
"torch/csrc/jit/passes/common_subexpression_elimination.cpp",
"torch/csrc/jit/passes/constant_propagation.cpp",
"torch/csrc/jit/passes/constant_pooling.cpp",
"torch/csrc/jit/passes/create_autodiff_subgraphs.cpp",
"torch/csrc/jit/passes/dead_code_elimination.cpp",
"torch/csrc/jit/passes/erase_number_types.cpp",
"torch/csrc/jit/passes/graph_fuser.cpp",
"torch/csrc/jit/passes/inline_autodiff_subgraphs.cpp",
"torch/csrc/jit/passes/inplace_check.cpp",
"torch/csrc/jit/passes/loop_unrolling.cpp",
"torch/csrc/jit/passes/lower_grad_of.cpp",
"torch/csrc/jit/passes/lower_tuples.cpp",
"torch/csrc/jit/passes/peephole.cpp",
"torch/csrc/jit/passes/python_print.cpp",
"torch/csrc/jit/passes/quantization.cpp",
"torch/csrc/jit/passes/remove_expands.cpp",
"torch/csrc/jit/passes/requires_grad_analysis.cpp",
"torch/csrc/jit/passes/shape_analysis.cpp",
"torch/csrc/jit/passes/specialize_autogradzero.cpp",
"torch/csrc/jit/passes/utils/subgraph_utils.cpp",
"torch/csrc/jit/passes/utils/memory_dag.cpp",
"torch/csrc/jit/register_prim_ops.cpp",
"torch/csrc/jit/register_special_ops.cpp",
"torch/csrc/jit/register_quantized_ops.cpp",
"torch/csrc/jit/scope.cpp",
"torch/csrc/jit/script/compiler.cpp",
"torch/csrc/api/src/jit.cpp",
"torch/csrc/jit/script/edit_distance.cpp",
"torch/csrc/jit/script/logging.cpp",
"torch/csrc/jit/script/final_returns.cpp",
"torch/csrc/jit/script/function_schema_parser.cpp",
"torch/csrc/jit/script/schema_type_parser.cpp",
"torch/csrc/jit/script/script_type_parser.cpp",
"torch/csrc/jit/script/sugared_value.cpp",
"torch/csrc/jit/script/schema_matching.cpp",
"torch/csrc/jit/script/class_type.cpp",
"torch/csrc/jit/script/parser.cpp",
"torch/csrc/jit/testing/file_check.cpp",
"torch/csrc/jit/import_source.cpp",
"torch/csrc/jit/hooks_for_testing.cpp",
"torch/csrc/jit/script/builtin_functions.cpp",
"torch/csrc/jit/script/lexer.cpp",
"torch/csrc/jit/script/strtod.cpp",
"torch/csrc/jit/script/module.cpp",
"torch/csrc/jit/tracer.cpp",
"torch/csrc/utils/tensor_flatten.cpp",
"torch/csrc/utils/variadic.cpp",
"torch/csrc/jit/fuser/kernel_cache.cpp",
"torch/csrc/jit/fuser/compiler.cpp",
"torch/csrc/jit/fuser/executor.cpp",
"torch/csrc/jit/fuser/codegen.cpp",
"torch/csrc/jit/fuser/fallback.cpp",
"torch/csrc/jit/fuser/cpu/fused_kernel.cpp",
"torch/csrc/jit/fuser/cpu/dynamic_library_unix.cpp",
"torch/csrc/jit/fuser/interface.cpp",
"test/cpp/jit/test.cpp",
]
libtorch_cuda_sources = [
"torch/csrc/cuda/comm.cpp",
"torch/csrc/cuda/nccl.cpp",
"torch/csrc/jit/fuser/cuda/fused_kernel.cpp",
"torch/csrc/jit/fuser/cuda/thnvrtc.cpp",
"torch/csrc/autograd/profiler_cuda.cpp",
"torch/csrc/autograd/functions/comm.cpp"
]
def add_torch_libs():
r = {}
# We start torch_python_sources with all cpp files, and exclude some
# including the files already contained in the torch and cuda bindings
globbed_sources = (native.glob(
["torch/csrc/**/*.cpp"],
exclude=[
# remove anything that has "generic" in it"s path
"torch/csrc/**/generic/**/*.cpp",
# distributed only uses Module.cpp
# so remove all other files and just include that
"torch/csrc/distributed/**/*.cpp",
# top-level hook of extension registration lives in a separate file
"torch/csrc/stub.cpp",
# to avoid redefinitions of symbols defined in
# dynamic_library_unix.cpp
"torch/csrc/jit/fuser/cpu/dynamic_library_win.cpp",
],
) + [
"torch/csrc/distributed/Module.cpp",
"torch/csrc/distributed/c10d/init.cpp",
"torch/csrc/distributed/c10d/ddp.cpp",
"torch/csrc/distributed/c10d/reducer.cpp",
] + [":generate-code=" + x for x in GENERATED_CPP])
libtorch_python_sources = sets.to_list(sets.difference(
sets.make(globbed_sources),
sets.make(libtorch_sources + libtorch_cuda_sources),
))
common_flags = {
"compiler_flags": [
"-D_THP_CORE",
"-DUSE_C10D",
"-DUSE_CUDNN",
"-DUSE_DISTRIBUTED",
"-DUSE_NCCL",
"-DUSE_NUMPY",
"-DUSE_SCALARS",
"-DNO_CUDNN_DESTROY_HANDLE",
"-DPYTORCH_ONNX_CAFFE2_BUNDLE",
"-Wno-write-strings",
"-Wno-format",
"-Wno-strict-aliasing",
"-Wno-non-virtual-dtor",
"-Wno-shadow-compatible-local",
"-Wno-empty-body",
],
"compiler_specific_flags": {
"clang": [
"-Wno-absolute-value",
"-Wno-expansion-to-defined",
"-Wno-pessimizing-move",
"-Wno-return-type-c-linkage",
"-Wno-unknown-pragmas",
]
},
"headers": native.glob(["torch/csrc/**/*.h", "torch/csrc/generic/*.cpp", "test/cpp/jit/*.h"]),
"preprocessor_flags": [
"-Icaffe2",
"-Icaffe2/torch/csrc/api/include",
"-Icaffe2/torch/csrc",
"-Icaffe2/torch/csrc/nn",
"-Icaffe2/torch/lib",
],
}
cpp_library(
name="libtorch",
srcs=libtorch_sources,
link_whole=True,
deps=[
":generated-autograd-headers",
":generated-autograd-headers-bare",
":generated-jit-headers",
"//caffe2/aten:ATen-cpu",
"//caffe2/caffe2:caffe2_cpu",
"//caffe2/torch/lib/libshm:libshm",
"//caffe2/caffe2/quantization/server:dnnlowp_ops",
],
external_deps=[
("nanopb", None, "protobuf-nanopb"),
("protobuf", None),
],
**common_flags
)
cpp_library(
name="libtorch_cuda",
srcs=libtorch_cuda_sources,
link_whole=True,
propagated_pp_flags=[
"-DUSE_CUDA",
"-DUSE_DIRECT_NVRTC",
],
deps=[
":generated-autograd-headers",
":generated-autograd-headers-bare",
":generated-jit-headers",
":libtorch",
"//caffe2/aten:ATen",
"//caffe2/aten:generated-aten-headers-cuda",
"//caffe2/caffe2:caffe2_cpu",
"//caffe2/caffe2:caffe2_gpu",
"//caffe2/torch/lib/libshm:libshm",
],
external_deps=[
("cudnn", "7.1.2", "cudnn-lazy"),
("nccl", "2.1.15", "nccl-lazy"),
("cuda", None, "nvToolsExt-lazy"),
("cuda", None, "nvrtc-lazy"),
("cuda", None, "nvrtc-builtins-lazy"),
],
**common_flags
)
# TODO: split it into cpp and cuda parts similarly to libtorch
cpp_library(
name="_C_impl",
srcs=libtorch_python_sources,
link_whole=True,
deps=[
":libtorch_cuda",
":thnn",
"//caffe2/torch/lib/THD:THD",
"//caffe2/torch/lib/c10d:c10d",
"//caffe2/torch/lib/libshm:libshm",
],
external_deps=[
("numpy", None, "cpp"),
("pybind11", None),
("python", None),
],
**common_flags
)
cpp_python_extension(
name="_C",
srcs=[
"torch/csrc/stub.cpp",
],
base_module="torch",
deps=[":_C_impl"],
)
return r
| [
"[email protected]"
] | |
e02c5e9a1c49d874e1b079d301802f321137b37f | 06f379b97fd704c27f6fed584e1087f32a47fbfe | /mrjob/boto_2_1_1_83aae37b.py | 6962cd2d80afc125716dbecaeb0b95ad469080c2 | [
"Apache-2.0"
] | permissive | bchess/mrjob | 45c7a0a6693463d3a7462a9fec543b149092b747 | 7415f57884f2ba4313ef9164e023174d5f36abae | refs/heads/master | 2021-01-15T19:49:16.511468 | 2011-12-27T21:03:16 | 2011-12-27T21:03:16 | 2,647,531 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 12,326 | py | # Copyright (c) 2010 Spotify AB
# Copyright (c) 2010-2011 Yelp
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""Code from a bleeding-edge version of boto on github, copied here so that
mrjob can formally depend on a stable release of boto (in this case, 2.0).
This module will hopefully go away in mrjob v0.4.
Please don't make multiple boto_* modules; just bump the module name to
whatever version you need to work from, and re-copy the relevant code.
This is intentionally somewhat ugly and tedious; our goal is to check
the patches we need into boto as fast as we can, so that we don't need
to copy code from future versions of boto into mrjob.
"""
import types
import boto.emr.connection
import boto.emr.emrobject
from boto.emr.emrobject import RunJobFlowResponse
from boto.emr.step import JarStep
# add the AmiVersion field to JobFlow
class JobFlow(boto.emr.emrobject.JobFlow):
Fields = boto.emr.emrobject.JobFlow.Fields | set(['AmiVersion'])
# this is used into describe_jobflows(), below. We don't actually patch
# the code for describe_jobflows(); just by virtue of being in this module,
# it refers to the JobFlow class above rather than the one in boto.
# copied in run_jobflow() and supporting functions. This supports the
# additional_info, ami_version, and instance_groups keywords, which don't
# exist in boto 2.0, as well as disabling the HadoopVersion API parameter.
class EmrConnection(boto.emr.connection.EmrConnection):
def describe_jobflows(self, states=None, jobflow_ids=None,
created_after=None, created_before=None):
"""
Retrieve all the Elastic MapReduce job flows on your account
:type states: list
:param states: A list of strings with job flow states wanted
:type jobflow_ids: list
:param jobflow_ids: A list of job flow IDs
:type created_after: datetime
:param created_after: Bound on job flow creation time
:type created_before: datetime
:param created_before: Bound on job flow creation time
"""
params = {}
if states:
self.build_list_params(params, states, 'JobFlowStates.member')
if jobflow_ids:
self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')
if created_after:
params['CreatedAfter'] = created_after.strftime(
boto.utils.ISO8601)
if created_before:
params['CreatedBefore'] = created_before.strftime(
boto.utils.ISO8601)
return self.get_list('DescribeJobFlows', params, [('member', JobFlow)])
def run_jobflow(self, name, log_uri, ec2_keyname=None,
availability_zone=None,
master_instance_type='m1.small',
slave_instance_type='m1.small', num_instances=1,
action_on_failure='TERMINATE_JOB_FLOW', keep_alive=False,
enable_debugging=False,
hadoop_version=None,
steps=[],
bootstrap_actions=[],
instance_groups=None,
additional_info=None,
ami_version=None):
"""
Runs a job flow
:type name: str
:param name: Name of the job flow
:type log_uri: str
:param log_uri: URI of the S3 bucket to place logs
:type ec2_keyname: str
:param ec2_keyname: EC2 key used for the instances
:type availability_zone: str
:param availability_zone: EC2 availability zone of the cluster
:type master_instance_type: str
:param master_instance_type: EC2 instance type of the master
:type slave_instance_type: str
:param slave_instance_type: EC2 instance type of the slave nodes
:type num_instances: int
:param num_instances: Number of instances in the Hadoop cluster
:type action_on_failure: str
:param action_on_failure: Action to take if a step terminates
:type keep_alive: bool
:param keep_alive: Denotes whether the cluster should stay
alive upon completion
:type enable_debugging: bool
:param enable_debugging: Denotes whether AWS console debugging
should be enabled.
:type hadoop_version: str
:param hadoop_version: Version of Hadoop to use. If ami_version
is not set, defaults to '0.20' for backwards compatibility
with older versions of boto.
:type steps: list(boto.emr.Step)
:param steps: List of steps to add with the job
:type bootstrap_actions: list(boto.emr.BootstrapAction)
:param bootstrap_actions: List of bootstrap actions that run
before Hadoop starts.
:type instance_groups: list(boto.emr.InstanceGroup)
:param instance_groups: Optional list of instance groups to
use when creating this job.
NB: When provided, this argument supersedes num_instances
and master/slave_instance_type.
:type ami_version: str
:param ami_version: Amazon Machine Image (AMI) version to use
for instances. Values accepted by EMR are '1.0', '2.0', and
'latest'; EMR currently defaults to '1.0' if you don't set
'ami_version'.
:type additional_info: JSON str
:param additional_info: A JSON string for selecting additional features
:rtype: str
:return: The jobflow id
"""
# hadoop_version used to default to '0.20', but this won't work
# on later AMI versions, so only default if it ami_version isn't set.
if not (hadoop_version or ami_version):
hadoop_version = '0.20'
params = {}
if action_on_failure:
params['ActionOnFailure'] = action_on_failure
params['Name'] = name
params['LogUri'] = log_uri
# Common instance args
common_params = self._build_instance_common_args(ec2_keyname,
availability_zone,
keep_alive,
hadoop_version)
params.update(common_params)
# NB: according to the AWS API's error message, we must
# "configure instances either using instance count, master and
# slave instance type or instance groups but not both."
#
# Thus we switch here on the truthiness of instance_groups.
if not instance_groups:
# Instance args (the common case)
instance_params = self._build_instance_count_and_type_args(
master_instance_type,
slave_instance_type,
num_instances)
params.update(instance_params)
else:
# Instance group args (for spot instances or a heterogenous cluster)
list_args = self._build_instance_group_list_args(instance_groups)
instance_params = dict(
('Instances.%s' % k, v) for k, v in list_args.iteritems()
)
params.update(instance_params)
# Debugging step from EMR API docs
if enable_debugging:
debugging_step = JarStep(name='Setup Hadoop Debugging',
action_on_failure='TERMINATE_JOB_FLOW',
main_class=None,
jar=self.DebuggingJar,
step_args=self.DebuggingArgs)
steps.insert(0, debugging_step)
# Step args
if steps:
step_args = [self._build_step_args(step) for step in steps]
params.update(self._build_step_list(step_args))
if bootstrap_actions:
bootstrap_action_args = [self._build_bootstrap_action_args(bootstrap_action) for bootstrap_action in bootstrap_actions]
params.update(self._build_bootstrap_action_list(bootstrap_action_args))
if ami_version:
params['AmiVersion'] = ami_version
if additional_info is not None:
params['AdditionalInfo'] = additional_info
response = self.get_object(
'RunJobFlow', params, RunJobFlowResponse, verb='POST')
return response.jobflowid
def _build_instance_common_args(self, ec2_keyname, availability_zone,
keep_alive, hadoop_version):
"""
Takes a number of parameters used when starting a jobflow (as
specified in run_jobflow() above). Returns a comparable dict for
use in making a RunJobFlow request.
"""
params = {
'Instances.KeepJobFlowAliveWhenNoSteps' : str(keep_alive).lower(),
}
if hadoop_version:
params['Instances.HadoopVersion'] = hadoop_version
if ec2_keyname:
params['Instances.Ec2KeyName'] = ec2_keyname
if availability_zone:
params['Instances.Placement.AvailabilityZone'] = availability_zone
return params
def _build_instance_count_and_type_args(self, master_instance_type,
slave_instance_type, num_instances):
"""
Takes a master instance type (string), a slave instance type
(string), and a number of instances. Returns a comparable dict
for use in making a RunJobFlow request.
"""
params = {
'Instances.MasterInstanceType' : master_instance_type,
'Instances.SlaveInstanceType' : slave_instance_type,
'Instances.InstanceCount' : num_instances,
}
return params
def _build_instance_group_args(self, instance_group):
"""
Takes an InstanceGroup; returns a dict that, when its keys are
properly prefixed, can be used for describing InstanceGroups in
RunJobFlow or AddInstanceGroups requests.
"""
params = {
'InstanceCount' : instance_group.num_instances,
'InstanceRole' : instance_group.role,
'InstanceType' : instance_group.type,
'Name' : instance_group.name,
'Market' : instance_group.market
}
if instance_group.market == 'SPOT':
params['BidPrice'] = instance_group.bidprice
return params
def _build_instance_group_list_args(self, instance_groups):
"""
Takes a list of InstanceGroups, or a single InstanceGroup. Returns
a comparable dict for use in making a RunJobFlow or AddInstanceGroups
request.
"""
if type(instance_groups) != types.ListType:
instance_groups = [instance_groups]
params = {}
for i, instance_group in enumerate(instance_groups):
ig_dict = self._build_instance_group_args(instance_group)
for key, value in ig_dict.iteritems():
params['InstanceGroups.member.%d.%s' % (i+1, key)] = value
return params
| [
"[email protected]"
] | |
ac95c0ed8b59e75ac4515c61349a08280c450c4c | 112aa4e67487571452477b0246a0061318065acf | /SDN_Python/SDN_Python/urls.py | 1fbaadc8ef9c51b1ec54bfc00aa480bc41453ff4 | [] | no_license | jayluxferro/OBP-SDN | a62779ab2670a6419cfe8632b46d6d27a0caf666 | 9cc5386573b270423bcf63aef93e58b6f49bc450 | refs/heads/master | 2023-03-23T14:08:44.303565 | 2021-03-22T10:11:38 | 2021-03-22T10:11:38 | 177,524,706 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | """SDN_Python URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import url, include
from django.urls import path
import dashboard.urls
urlpatterns = [
#path('admin/', admin.site.urls),
url(r'^', include(dashboard.urls)),
]
| [
"[email protected]"
] | |
3aad3f01945c664308ccc3b561e0e56cfa002712 | d27af9d58b91b8cd998ac0eb87d980d304ff0670 | /Beginner-Contest/ABC102/ABC102_C.py | 8253d9718ee6884db84fd77f2eb490882b54cfce | [] | no_license | mongesan/Atcoder-m0_ngesan-py | 29dd79daab149003ffc8b6b6bad5fa2e7daa9646 | 6654af034d4ff4cece1be04c2c8b756976d99a4b | refs/heads/master | 2023-08-20T19:50:04.547025 | 2021-10-27T12:24:51 | 2021-10-27T12:24:51 | 258,486,105 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9 | py | #ABC102_C | [
"[email protected]"
] | |
0c6469587fb87fb9776a74d943a5d7a7ee89bd7e | 51e6234f683ed70207f53d6ee3f537c715082517 | /test/setup.py | d98509b0fedc1c07bf389086c75fd98e825bbd80 | [
"BSD-2-Clause"
] | permissive | ninjaaron/fast-entry_points | d13d36ee7c4d73a425fba1d0f167aba1e8970127 | a3a26f320c7ae2191fde71b79d4f4bf325d162f3 | refs/heads/master | 2021-10-09T16:16:14.618890 | 2021-10-07T11:55:03 | 2021-10-07T11:55:03 | 64,887,433 | 131 | 23 | BSD-2-Clause | 2021-06-21T19:49:44 | 2016-08-03T23:57:37 | Python | UTF-8 | Python | false | false | 236 | py | from setuptools import setup
import fastentrypoints
setup(
name='dummypkg',
version='0.0.0',
py_modules=['dummy'],
description='dummy package for the test',
entry_points={'console_scripts': ['hello=dummy:main']},
)
| [
"[email protected]"
] | |
371e51252c8dd93d6a036b76d919eb54d33bbba8 | 237162607427106ae9564670d47427a62356861f | /core/migrations/0153_divisionlocation.py | 87a4cdf31d325f60c95cbffe34744975f8ef282d | [] | no_license | pitipund/basecore | 8648c1f4fa37b6e6075fd710ca422fe159ba930e | a0c20cec1e17dd0eb6abcaaa7d2623e38b60318b | refs/heads/master | 2020-09-13T20:16:02.622903 | 2019-11-20T09:07:15 | 2019-11-20T09:07:15 | 221,885,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2019-01-14 11:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0029_auto_20190114_1040'),
('core', '0152_language'),
]
operations = [
migrations.CreateModel(
name='DivisionLocation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=True)),
('division', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Division')),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Location')),
],
options={
'verbose_name': 'สถานที่ตั้งหน่วยงาน',
'verbose_name_plural': 'สถานที่ตั้งหน่วยงาน',
},
),
]
| [
"[email protected]"
] | |
15551af86e94ba02d1ac2f0711c56efd691bcc5b | 3ee04e8eef626c3d65f7b4ff218fbb01ba7dcff4 | /main/migrations/0010_auto_20180331_1204.py | a80cf707c589b92029eb2d2c4ac58bd6c57d808f | [] | no_license | David-OConnor/books | 44499fba804394187103567b021252ecff9b906c | 1c03b8c026de08eb1989e99171af01e7e8a7bbc9 | refs/heads/master | 2021-01-10T18:07:40.559040 | 2019-10-18T18:57:06 | 2019-10-18T18:57:06 | 43,619,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | # Generated by Django 2.0.3 on 2018-03-31 12:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0009_auto_20180331_1117'),
]
operations = [
migrations.CreateModel(
name='AdelaideWork',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('author_first', models.CharField(max_length=50)),
('author_last', models.CharField(max_length=50)),
('translator', models.CharField(blank=True, max_length=100, null=True)),
('url', models.CharField(blank=True, max_length=100, null=True)),
],
),
migrations.AlterUniqueTogether(
name='adelaidework',
unique_together={('author_last', 'title')},
),
]
| [
"[email protected]"
] | |
3b0dbff77c453d04d05c68a3fe87fc404a795510 | dc76018904675c64b6eb728d253a162802a584be | /urls.py | d4c2bf2fa81994fffdc6df56a2b8599172bf517e | [] | no_license | edb-gjengen/mbftns | ee36b0e28b5d8f0200b1407bb3940f220a75e553 | 0434c7ec16743467602481615ef1b87bf53df565 | refs/heads/master | 2021-01-20T20:53:17.284180 | 2012-09-05T12:18:29 | 2012-09-05T12:18:29 | 65,237,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | from django.conf.urls.defaults import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'main.views.index', name='index'),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('main.urls')),
url(r'^accounts/login', 'django.contrib.auth.views.login'),
url(r'^accounts/logout', 'django.contrib.auth.views.logout'),
)
# for dev
#from django.contrib.staticfiles.urls import staticfiles_urlpatterns
#urlpatterns += staticfiles_urlpatterns()
| [
"[email protected]"
] | |
838eda0275474ae5c472ea23d89a3ed57c710874 | 1a29735113eeb8061527c9e785fb3e16abe10449 | /lib/pymod/pymod/test/command/refresh.py | 1ba750637b0f0e168538bde48a54c2f2fd2ceed5 | [] | no_license | tjfulle/Modulecmd.py | db3fb96db63e42666056e8086f433a779f5bfc86 | 42e3d34b76a53f4ff557e96ba2af3cb83b963ad2 | refs/heads/master | 2023-02-21T10:16:49.408099 | 2021-11-18T06:29:59 | 2021-11-18T06:29:59 | 141,306,544 | 0 | 0 | null | 2019-05-09T04:51:09 | 2018-07-17T15:09:16 | Python | UTF-8 | Python | false | false | 739 | py | import pytest
import pymod.mc
from pymod.main import PymodCommand
@pytest.fixture()
def modules_path(tmpdir, namespace, modulecmds):
m = modulecmds
one = tmpdir.mkdir("1")
one.join("a.py").write(m.setenv("a"))
one.join("b.py").write(m.setenv("b"))
one.join("c.py").write(m.setenv("c"))
one.join("d.py").write(m.setenv("d"))
ns = namespace()
ns.one = one.strpath
return ns
@pytest.mark.unit
def test_command_refresh(modules_path, mock_modulepath):
load = PymodCommand("load")
refresh = PymodCommand("refresh")
mock_modulepath(modules_path.one)
load("a", "b", "c", "d")
refresh()
loaded = "".join(_.fullname for _ in pymod.mc.get_loaded_modules())
assert loaded == "abcd"
| [
"[email protected]"
] | |
cf7bf48f6a2df6d3cec9391c5bb31ea49634341b | c2fcc0709ed113037201c707fcebe298966e5694 | /tests/test_base.py | 5eec23115c64d8b527e308c49cb795e72b51b47e | [] | no_license | HyperSuprime-Cam/astshim | da1364f301847d8a2cdb6fad63e96aeb4780f694 | a72da6bbfa9d1fec5e5d87d1aa560234f2b95958 | refs/heads/master | 2022-04-03T16:35:25.002206 | 2020-01-23T20:12:51 | 2020-01-23T20:12:51 | 109,919,007 | 0 | 0 | null | 2017-11-08T02:46:38 | 2017-11-08T02:46:38 | null | UTF-8 | Python | false | false | 1,069 | py | import unittest
import numpy as np
from numpy.testing import assert_equal
import astshim as ast
from astshim.test import ObjectTestCase
class TestBase(ObjectTestCase):
def test_arrayFromVector(self):
nAxes = 3
nValues = 5
np.random.seed(1)
dataVec = np.random.rand(nAxes * nValues)
desiredDataArr = dataVec.copy()
desiredDataArr.shape = (nAxes, nValues)
dataArr = ast.arrayFromVector(vec=dataVec, nAxes=nAxes)
assert_equal(dataArr, desiredDataArr)
dataArr2 = ast.arrayFromVector(vec=list(dataVec), nAxes=nAxes)
assert_equal(dataArr2, desiredDataArr)
# make sure dataArr is a deep copy; changing dataVec should
# not change dataArr
dataVec[0] += 10
assert_equal(dataArr, desiredDataArr)
for delta in (-1, 1):
badDataVec = np.random.rand(nAxes * nValues + delta)
with self.assertRaises(RuntimeError):
ast.arrayFromVector(vec=badDataVec, nAxes=nAxes)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
bf41b56921afb1efed93ac1950ab83f8c7977aa1 | ec677ccd7d536b889260469f062f2c5db2f23a99 | /runoschool/runoschool/urls.py | e1a382bc28bf06185aebd89c7189c0676278de40 | [] | no_license | samuelatuma1/runo | fa93f523eaed6dc04068cddaf3d67fd908dfc7bf | 33b6c636e27b3157a7e0bee4a5e7dc76a58dd0e2 | refs/heads/master | 2023-07-14T12:43:38.284693 | 2021-08-22T16:56:48 | 2021-08-22T16:56:48 | 386,833,990 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | """runoschool URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('runo.urls', namespace='runo')),
# path('', include('django.contrib.auth.urls'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
9d2ef5da9fc5d97e998e8ead3784d778354cd46f | b06978b6020ce3240912ba5c131c4f38a86d7996 | /Pycharm_files/Dictionaries/Chapter_5_reading.py | 5200f5c68a50cfdff5f9eff732463af79a87c2c7 | [] | no_license | mn4774jm/PycharmProjects | 95dc8ee6b89a85ba02d4134aa5b5bce11004647b | 886bcf2400abc9a1f797fe98d09241f99fa16322 | refs/heads/master | 2021-08-09T10:20:27.907847 | 2020-09-04T15:21:21 | 2020-09-04T15:21:21 | 219,878,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,906 | py | # # Birthdays.py
#
# birthdays = {'Alice':'Apr 1', 'Bob':'Dec 12', 'Carol':'Mar 4'}
#
# while True:
# print('Enter a name: (blank to quit)')
# name = input()
# if name =='':
# break
#
# if name in birthdays:
# print(birthdays[name] + ' is the birthday of '+ name)
# else:
# print('I do not have birthday information for '+name)
# print('What is their birthday?')
# bday = input()
# birthdays[name] = bday
# print('Birthday database updated.')
############################################
#using Data types in loops; .values(), .keys(), .items()
# spam = {'color': 'red', 'age': 42}
# #dict_keys
# # for k in spam.keys():
# # print(k)
#
# #dict_values
# # for v in spam.values():
# # print(v)
#
# #dict_items
# for i in spam.items():
# print(i)
#############################################
# #multiple assignment trick
# spam = {'color': 'red', 'age': 42}
# for k, v in spam.items():
# print('Key: ' + k + 'Value: ' + str(v))
#############################################
# #The get method; .get()
# #Because the value of cups in the dictionary is 2 it will be cups will print 2
# picnicItems = {'apples': 5, 'cups': 2}
# cups = 'I am bringing ' + str(picnicItems.get('cups', 0)) + ' cups.'
# print(cups)
#
# #because there is no key called eggs in the dictionary 0 will be printed
# eggs = 'I am bringing ' + str(picnicItems.get('eggs', 0)) + ' eggs.'
# print(eggs)
#############################################
#The setdefault() method
#used for setting value for a dictionary key whos value does not already exist
# spam = {'name': 'Pooka', 'age': 5}
# if 'color' not in spam:
# spam['color'] = 'black'
# print(spam)
# print(spam.keys())
#############################################
# #characterCount.py / prettyPrinting.py
# import pprint
# message = 'It was a bright cold day in April, and the clocks were striking thirteen'
# count = {}
#
# for character in message:
# count.setdefault(character,0)
# count[character] = count[character] +1
#
# print(pprint.pformat(count))
#############################################
#ticTacToe.py
# theBoard = {'top-L': ' ', 'top-M': ' ', 'top-R': ' ',
# 'mid-L': ' ', 'mid-M': ' ', 'mid-R': ' ',
# 'low-L': ' ', 'low-M': ' ', 'low-R': ' '}
#
# def printBoard(board):
# print(board['top-L'] + '|' + board['top-M'] + '|' + board['top-R'])
# print('-+-+-')
# print(board['mid-L'] + '|' + board['mid-M'] + '|' + board['mid-R'])
# print('-+-+-')
# print(board['low-L'] + '|' + board['low-M'] + '|' + board['low-R'])
#
# turn = 'X'
# for i in range(9):
# printBoard(theBoard)
# print('Turn for '+turn+'. Move on which space?')
# move = input()
# theBoard[move] = turn
# if turn == 'X':
# turn = 'O'
# else:
# turn = 'X'
#
# printBoard(theBoard)
##############################################
#totalBought example; nested dictionary
# allGuests = {'Alice': {'apples': 5, 'pretzels': 12},
# 'Bob': {'ham sandwiches': 3, 'apples': 2},
# 'Carol': {'cups': 3, 'apple pies': 1}}
#
# #Inside the loop, the string of the guest's names is assigned to k,
# #and the dictionary of picnic items is assigned to v.
# def totalBrought(guests, item):
# numBrought = 0
# for k, v in guests.items():
# # if item is not present its value will default to 0
# numBrought = numBrought + v.get(item, 0)
# return numBrought
#
# print('Number of things being brought:')
# print(' - Apples ' + str(totalBrought(allGuests, 'apples')))
# print(' - Cups ' + str(totalBrought(allGuests, 'cups')))
# print(' - Cakes ' + str(totalBrought(allGuests, 'cakes')))
# print(' - Ham Sandwiches ' + str(totalBrought(allGuests, 'ham sandwiches')))
# print(' - Apple Pies ' + str(totalBrought(allGuests, 'apple pies')))
| [
"[email protected]"
] | |
7642e99717c9f80209dd05eb0cc3bd3525ee7d19 | 92a1d7c2bb2119c67d9e33f1e48a1a02335772ce | /book/p8/8_10.py | 3519916dc5e72c9ccce016fab3c0190fe9f2d6f8 | [] | no_license | zephyr123/blibli | b615fb2ee2f1f98deaf5709640e18d3be1a656ac | d45ba1bcce66dc1df185a475abe01f744c128c1b | refs/heads/master | 2021-07-07T09:23:18.347107 | 2020-03-08T13:40:29 | 2020-03-08T13:40:29 | 244,829,368 | 0 | 0 | null | 2021-06-10T22:38:02 | 2020-03-04T06:55:15 | Python | UTF-8 | Python | false | false | 307 | py | def show_magicians(names):
for name in names:
print(name)
def make_great(names):
while names:
curr_names = "the Great " + names.pop()
mod_names.append(curr_names)
magic_names = ['liuqian','zhuxun','dongqing']
mod_names = []
make_great(magic_names)
show_magicians(mod_names) | [
"[email protected]"
] | |
e8cb02f30831c8e4ad17d9e2d6f87fb1386d7d12 | daee54824cb107f9b5749e3c12e7f09f544bac0e | /modules/vtk_basic/vtkJPEGWriter.py | cd7a99ec769de84b521d124207929c1fb3f9fdcc | [] | no_license | JoonVan/devide | 8fa556d2b42c5ad70c3595303253f2a171de0312 | 586225d68b079e2a96007bd33784113b3a19a538 | refs/heads/master | 2020-12-26T06:25:01.744966 | 2017-01-22T19:47:50 | 2017-01-22T19:47:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | # class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkJPEGWriter(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkJPEGWriter(), 'Writing vtkJPEG.',
('vtkJPEG',), (),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| [
"[email protected]"
] | |
b17ffa7de9e79f3f88860099d16d5ecd324368a4 | 70f1c694bea6178c98b134b9c44952ef6693be9f | /Manuscript/figure/Figure_MITE_auto_promoter/Auto_target/scripts/make_activeTE-pep-msa-one.py | 5f10b1bc98f0a04409b2db8ab671d463341e6a2d | [] | no_license | JinfengChen/Rice_pop | 5c19c5837805e51ddb3b2ffba4baffdc59c9bfd3 | ef272bf4825b29610c94de55eb53f231fb5febc6 | refs/heads/master | 2020-04-07T04:55:36.606594 | 2018-03-02T16:52:53 | 2018-03-02T16:52:53 | 33,501,941 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,332 | py | #!/usr/bin/env python
import sys
import os
import fnmatch
import os.path
import subprocess as subp
import fastaIO
args = sys.argv[1:]
def usage():
print """
Usage: make_activeTE-pep-msa.py <pep-cluster_MSA_folder> <match_pattern> <run_name> <found_superfamily_list>
"""
sys.exit(-1)
if (len(args) != 3 and len(args) != 4) or sys.argv[1] == '-h' or sys.argv[1] == '-help' or sys.argv[1] == '-H' or sys.argv[1] == '-Help' or sys.argv[1] == '--h' or sys.argv[1] == '--help':
usage()
top = '''#!/bin/bash
#!/bin/bash
#PBS -l nodes=1:ppn=1,mem=8gb,walltime=08:00:00 -j oe
module load stajichlab
module load perl/5.16.3
module load fasta
module load trimal
cd $PBS_O_WORKDIR
'''
middle = '''perl /rhome/cjinfeng/software/tools/mTEA/scripts/activeTE_msa.pl -p -a -f 26 '''
files = os.listdir(sys.argv[1])
out_handle = open("aTE-pep_" + sys.argv[3] + ".sh", "w")
print >>out_handle, top
for i in files:
if fnmatch.fnmatch(i, sys.argv[2]):
fpath = os.path.join(sys.argv[1], i)
if len(args) == 4:
full = middle + fpath + " " + sys.argv[4]
else:
full = middle + fpath
#out_handle = open("aTE-pep_" + sys.argv[3] + "_" + i + ".sh", "w")
print>>out_handle, full
print >>out_handle, '\n\necho "Done"'
out_handle.close()
| [
"[email protected]"
] | |
b02f08e27f8000cd103dda67c861f67cd6103769 | 77e303d8353170f4181ab9ff66ac77cb57d46caf | /src/508A.py | 4df01f6a789a6ea263d31dfc23439685b3bc3af6 | [
"MIT"
] | permissive | viing937/codeforces | 14f689f2e3360939912e927fb830c69f7116b35c | 5bd8c2bec0e48cb2b4830c26849ea7fda447267c | refs/heads/master | 2022-09-25T19:51:03.891702 | 2022-08-15T15:32:54 | 2022-08-15T15:32:54 | 32,905,529 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | # coding: utf-8
n, m, k = [int(i) for i in input().split()]
mark = [[0 for i in range(m+2)] for j in range(n+2)]
for c in range(k):
i, j = [int(i) for i in input().split()]
mark[i][j] = 1
if ( mark[i-1][j-1]==1 and mark[i][j-1]==1 and mark[i-1][j]==1 ) \
or ( mark[i][j-1]==1 and mark[i+1][j]==1 and mark[i+1][j-1]==1 ) \
or ( mark[i][j+1]==1 and mark[i-1][j]==1 and mark[i-1][j+1]==1 ) \
or ( mark[i][j+1]==1 and mark[i+1][j]==1 and mark[i+1][j+1]==1 ):
print(c+1)
break
else:
print(0)
| [
"[email protected]"
] | |
09529835a8153d35821c70fe6e90354fc9ab7438 | edcd74f8f65119bdbe737360c2ca33b4a6da160a | /python/problem-math/sum_of_square_numbers.py | 0b0dfde875e4a0b998dba553d721bfd19bf60025 | [] | no_license | hyunjun/practice | 72e83de6a1d5e04ddcd16526f16110ea2dd00373 | 5376dd48b1cefb4faba9d2ef6a8a497b6b1d6c67 | refs/heads/master | 2023-08-31T07:00:37.320351 | 2023-08-17T07:29:24 | 2023-08-17T07:29:24 | 2,704,126 | 3 | 2 | null | 2022-12-14T20:25:07 | 2011-11-03T18:28:44 | Python | UTF-8 | Python | false | false | 807 | py | # https://leetcode.com/problems/sum-of-square-numbers
# https://leetcode.com/problems/sum-of-square-numbers/solution
import math
class Solution:
# 90.88%
def judgeSquareSum(self, c):
if c < 0:
return False
if 0 == c:
return True
smaller, larger = 1, int(math.sqrt(c))
while smaller <= larger:
smaller = math.sqrt(c - larger ** 2)
if int(smaller) == smaller:
return True
larger -= 1
return False
s = Solution()
data = [(5, True),
(4, True),
(3, False),
(125, True),
(129, False),
]
for c, expected in data:
real = s.judgeSquareSum(c)
print('{}, expected {}, real {}, result {}'.format(c, expected, real, expected == real))
| [
"[email protected]"
] | |
adbb25263a000d69f883646bd0fbdb9d76a046b6 | cb10a56ab0515703bf65c5d9ab6e9c75b2e53031 | /src/images/training_images/try.py | d447233f1299786ae5526dc3aeb5b1f9382ae69a | [
"MIT"
] | permissive | JuanMorenoS/Captcha-With-Neuronal-Network | 2d09bc6e5ac308559aa7d8a0aa590b3c847e6022 | 3c4a119cb4df999011760caaa8f8271027de4897 | refs/heads/master | 2020-03-11T00:47:36.133301 | 2018-05-05T23:51:25 | 2018-05-05T23:51:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | from PIL import Image
import pytesseract
import argparse
import cv2
import os
for i in range(4746,10001):
print(i,pytesseract.image_to_string(Image.open(str(i)+".jpg") ,config='-c tessedit_char_whitelist=abcdef0123456789') )
| [
"="
] | = |
f23f7227996acd01fd328809befc31c62faa0a5b | ea178f0977127189c7559dfa9ca2faadceef5ff8 | /python/jittor/test/test_new_fused_op.py | 32b9c488039e6b263c6ec0c9b443abf92f54ef20 | [
"Apache-2.0"
] | permissive | AbbasMZ/jittor | a0bb5b2cbceeffb40c61405b863e7e4b91567756 | fcec57f70422b52d6b8d0235e29f91fd2212f559 | refs/heads/master | 2023-06-20T07:07:22.952846 | 2021-07-15T14:40:54 | 2021-07-15T14:40:54 | 386,115,280 | 0 | 0 | Apache-2.0 | 2021-07-15T00:42:22 | 2021-07-15T00:39:53 | null | UTF-8 | Python | false | false | 1,451 | py | # ***************************************************************
# Copyright (c) 2021 Jittor. All Rights Reserved.
# Maintainers:
# Guowei Yang <[email protected]>
# Dun Liang <[email protected]>.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import sys
import os
import jittor as jt
import unittest
import time
import numpy as np
from .test_log import find_log_with_re
class TestNewFuse(unittest.TestCase):
@classmethod
def setUpClass(self):
return
def check(self, h, w, cs, rs, pa, rtp, dim):
a = jt.random([h,w])
a.sync()
with jt.log_capture_scope(
log_v=0, log_vprefix="tuner_manager=100",
# this value is used for force compile
compile_options={"test_new_fused_op":1}
) as logs:
amean=jt.mean(a, dims=[dim], keepdims=1)
a2mean=jt.mean(a*a, dims=[dim], keepdims=1)
norm_aa=(a-amean.broadcast_var(a))/(jt.sqrt(a2mean-amean*amean).broadcast_var(a))
norm_aa.sync()
logs = find_log_with_re(logs,
"Run tuner reduce: confidence\\((.*)\\) candidates\\((.*)\\)$")
assert len(logs) == 3, logs
def test_new_fuse(self):
self.check(8192,8192, 0, 0, 0, 5, 0)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
cb05cede811123fa5e3a317f94586695225fe6ed | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/Generators/MadGraphModels/python/models/scalar_singlet_750_UFO/coupling_orders.py | fe4f223b9c8dbcde3bda5f37220247ddec40d888 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | # This file was automatically created by FeynRules 2.3.7
# Mathematica version: 10.2.0 for Linux x86 (64-bit) (July 28, 2015)
# Date: Thu 26 Nov 2015 09:52:28
from object_library import all_orders, CouplingOrder
QCD = CouplingOrder(name = 'QCD',
expansion_order = 99,
hierarchy = 1)
QED = CouplingOrder(name = 'QED',
expansion_order = 99,
hierarchy = 2)
NP = CouplingOrder(name = 'NP',
expansion_order = 99,
hierarchy = 1)
| [
"[email protected]"
] | |
8dbc5c3a580ccb580ec81f61fc49b76815891337 | e7b312b4cc3355f4ca98313ef2ac9f3b0d81f245 | /kickstart/2018/round_c/a/make_large_input.py | 04519b5c74a2e0bdcab79f66b28555502a3fd222 | [] | no_license | minus9d/programming_contest_archive | 75466ab820e45ee0fcd829e6fac8ebc2accbbcff | 0cb9e709f40460305635ae4d46c8ddec1e86455e | refs/heads/master | 2023-02-16T18:08:42.579335 | 2023-02-11T14:10:49 | 2023-02-11T14:10:49 | 21,788,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import array
from bisect import *
from collections import *
import fractions
import heapq
from itertools import *
import math
import random
import re
import string
import sys
T = 100
print(T)
for t in range(T):
N = 1000
print(N)
used = set()
for n in range(2, N+1):
m = random.randint(1, n-1)
print("{} {}".format(n, m))
used.add((n, m))
used.add((m, n))
while True:
i = random.randint(1, N)
j = random.randint(1, N)
if i == j or (i, j) in used:
continue
else:
print("{} {}".format(i, j))
break
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.