blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bb73179a28bb142c274961bea46d714b9cd87d26 | 923a3f7be34e10931936823df0740d5d845d26e5 | /Courses/MCCT2009/Intro/execute_request.py | ff0b0046dc6b4bed302c6269c14964844dc8cb8d | [] | no_license | o-smirnov/public-documents | 0572ccef548a321e70b8cad2e2f2c249926f017d | 9e758ddf375c0f748376d2e37d0fea9661ed7c37 | refs/heads/master | 2021-01-15T23:40:03.183127 | 2015-06-04T17:18:22 | 2015-06-04T17:18:22 | 31,723,928 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,620 | py | # file: ../beginners_guide/execute_request.py
from Timba.TDL import *
from Timba.Meq import meq
request_counter = 0
#------------------------------------------------------------------------
TDLRuntimeMenu("Parameters of the Request domain:",
TDLOption('ropt_num_freq', 'nr of freq channels',
[10,11,1,20,50,100], more=int,
doc='nr of domain cells in the freq direction'),
TDLOption('ropt_num_time', 'nr of time channels',
[11,10,1,20,50,100], more=int,
doc='nr of domain cells in the time direction'),
TDLMenu("time size:",
TDLOption('ropt_t1', 'start time (s)',
[1.0,0.0,-1.0], more=float,
doc='min time (s) of the domain (edge)'),
TDLOption('ropt_t2', 'stop time (s)',
[10.0,1.0], more=float,
doc='max time (s) of the domain (edge)'),
),
TDLMenu("freq size:",
TDLOption('ropt_f1', 'start freq (Hz)',
[1.0], more=float,
doc='min freq (Hz) of the domain (edge)'),
TDLOption('ropt_f2', 'stop freq (Hz)',
[11.0], more=float,
doc='max freq (Hz) of the domain (edge)'),
)
)
#------------------------------------------------------------------------
def execute_request (mqs, node,
# f1=None, f2=None, t1=None, t2=None,
# num_freq=None, num_time=None,
freq_offset=0.0, time_offset=0.0,
parent=None, trace=False):
"""
Execute the given node with the specified time-freq domain (size and cells)
The (optional) freq and time offsets are fractions of the domain size.
"""
foffset = (ropt_f1-ropt_f2)*freq_offset
toffset = (ropt_t1-ropt_t2)*time_offset
domain = meq.domain(ropt_f1+foffset, ropt_f2+foffset,
ropt_t1+toffset, ropt_t2+toffset)
cells = meq.cells(domain, num_freq=ropt_num_freq,
num_time=ropt_num_time)
global request_counter
request_counter += 1
rqid = meq.requestid(request_counter)
request = meq.request(cells, rqid=rqid)
result = mqs.execute(node, request)
return None
#------------------------------------------------------------------------
| [
"[email protected]"
] | |
5d6c44da5a001474f63de5209baa264dfce32af8 | 353626e216085601f8be641be4c775a563fdc95e | /word_discovery.py | c4428bb88d5996cf78f774e79fa5218f517f8dff | [] | no_license | houking-can/english-word-discovery | 148729a4cda980b1b6a67ef49e6a934ffc53ac90 | 1c6276642ac360f411c9841daa2edd4bc4b959d2 | refs/heads/master | 2020-09-24T18:59:44.044460 | 2019-12-04T08:56:40 | 2019-12-04T08:56:40 | 225,821,558 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,307 | py | # ! -*- coding: utf-8 -*-
import struct
import os
import math
import logging
from collections import Counter
logging.basicConfig(level=logging.INFO, format=u'%(asctime)s - %(levelname)s - %(message)s')
from tqdm import tqdm
class KenlmNgrams:
"""加载Kenlm的ngram统计结果
vocab_file: Kenlm统计出来的词(字)表;
ngram_file: Kenlm统计出来的ngram表;
order: 统计ngram时设置的n,必须跟ngram_file对应;
min_count: 自行设置的截断频数。
"""
def __init__(self, vocab_file, ngram_file, order, min_count):
self.vocab_file = vocab_file
self.ngram_file = ngram_file
self.order = order
self.min_count = min_count
self.read_chars()
self.read_ngrams()
def read_chars(self):
f = open(self.vocab_file, encoding='utf-8')
chars = f.read()
f.close()
chars = chars.split('\x00')
self.chars = chars
def read_ngrams(self):
"""读取思路参考https://github.com/kpu/kenlm/issues/201
"""
self.ngrams = [{} for _ in range(self.order)]
self.total = 0
size_per_item = self.order * 4 + 8
f = open(self.ngram_file, 'rb')
filedata = f.read()
filesize = f.tell()
f.close()
for i in tqdm(range(0, filesize, size_per_item)):
s = filedata[i: i + size_per_item]
n = self.unpack('l', s[-8:])
if n >= self.min_count:
self.total += n
c = [self.unpack('i', s[j * 4: (j + 1) * 4]) for j in range(self.order)]
c = [self.chars[j] for j in c if j > 2]
for j in range(len(c)):
self.ngrams[j][' '.join(c[:j + 1])] = self.ngrams[j].get(' '.join(c[:j + 1]), 0) + n
def unpack(self, t, s):
return struct.unpack(t, s)[0]
def write_corpus(texts, corpus):
"""将语料写到文件中,词与词(字与字)之间用空格隔开
"""
print('exporting corpus...')
with open(corpus, 'w', encoding='utf-8') as f:
for s in texts:
f.write(s)
def write_vocab(corpus, vocab):
print('writing vocab...')
tmp = open(corpus, encoding='utf-8').read().split()
words = []
for w in tqdm(tmp):
w = w.strip('.')
w = w.strip(',')
w = w.strip(')')
w = w.strip('(')
w = w.strip('\'')
w = w.strip('?')
w = w.strip('!')
w = w.strip('.')
w = w.strip(',')
w = w.strip(')')
w = w.strip('(')
w = w.strip('\'')
w = w.strip('?')
w = w.strip('!')
words.append(w)
words = list(Counter(words).items())
words.sort(key=lambda k: k[1], reverse=True)
with open(vocab, 'w', encoding='utf-8') as f:
for w in words:
# f.write(w[0] + ' ' + str(w[1])+'\n')
f.write(w[0] + '\n')
def count_ngrams(corpus_file, order, vocab_file, ngram_file):
"""通过os.system调用Kenlm的count_ngrams来统计频数
"""
return os.system(
'/home/yhj/paper/ijcai-2020/kenlm/build/bin/count_ngrams -o %s --write_vocab_list %s <%s >%s'
% (order, vocab_file, corpus_file, ngram_file)
)
def filter_ngrams(ngrams, total, min_pmi=1):
"""通过互信息过滤ngrams,只保留“结实”的ngram。
"""
order = len(ngrams)
if hasattr(min_pmi, '__iter__'):
min_pmi = list(min_pmi)
else:
min_pmi = [min_pmi] * order
output_ngrams = set()
total = float(total)
for i in range(order - 1, 0, -1):
print('order: ', i)
for w, v in tqdm(ngrams[i].items()):
w = w.split(' ')
pmi = min([
total * v / (ngrams[j].get(' '.join(w[:j + 1]), total) * ngrams[i - j - 1].get(' '.join(w[j + 1:]),
total))
for j in range(i)
])
if math.log(pmi) >= min_pmi[i]:
output_ngrams.add(' '.join(w))
return output_ngrams
class SimpleTrie:
"""通过Trie树结构,来搜索ngrams组成的连续片段
"""
def __init__(self):
self.dic = {}
self.end = True
def add_word(self, word):
_ = self.dic
for c in word:
if c not in _:
_[c] = {}
_ = _[c]
_[self.end] = word
def tokenize(self, sent): # 通过最长联接的方式来对句子进行分词
result = []
start, end = 0, 1
for i, c1 in tqdm(enumerate(sent), total=len(sent)):
_ = self.dic
if i == end:
result.append(sent[start: end])
start, end = i, i + 1
for j, c2 in enumerate(sent[i:]):
if c2 in _:
_ = _[c2]
if self.end in _:
if i + j + 1 > end:
end = i + j + 1
else:
break
result.append(sent[start: end])
return result
def filter_vocab(candidates, ngrams, order):
"""通过与ngrams对比,排除可能出来的不牢固的词汇(回溯)
"""
result = {}
for i, j in candidates.items():
if len(i) < 3:
result[i] = j
elif len(i) <= order and i in ngrams:
result[i] = j
elif len(i) > order:
flag = True
for k in range(len(i) + 1 - order):
if i[k: k + order] not in ngrams:
flag = False
if flag:
result[i] = j
return result
# ======= 算法构建完毕,下面开始执行完整的构建词库流程 =======
import re
import glob
import json
# 语料生成器,并且初步预处理语料
# 这个生成器例子的具体含义不重要,只需要知道它就是逐句地把文本yield出来就行了
def text_generator():
txts = [os.path.join('./data', each) for each in os.listdir('./data')]
for txt in txts:
d = open(txt, encoding='utf-8').read()
d = d.split('\n')
res = ''
for line in d:
if '\t' in line:
line = line.split('\t')[1]
line.rstrip('.')
line.strip()
res += line + ' '
yield res
min_count = 4
order = 6
corpus_file = 'scierc.corpus' # 语料保存的文件名
vocab_file = 'scierc.vocab' # 字符集保存的文件名
ngram_file = 'scierc.ngrams' # ngram集保存的文件名
output_file = 'scierc.phrase' # 最后导出的词表文件名
chars_file = 'scierc.chars'
ngrams_json = 'scierc.ngrams.json'
# write_corpus(text_generator(), corpus_file) # 将语料转存为文本
# write_vocab(corpus_file, vocab_file)
# count_ngrams(corpus_file, order, chars_file, ngram_file) # 用Kenlm统计ngram
# ngrams = KenlmNgrams(chars_file, ngram_file, order, min_count)
# json.dump({'ngrams': ngrams.ngrams, 'total': ngrams.total}, open(ngrams_json, 'w'), indent=4)
ngrams = json.load(open(ngrams_json))
ngrams = filter_ngrams(ngrams['ngrams'], ngrams['total'], [0, 2, 4, 6, 8, 10]) # 过滤ngram
ngrams = list(ngrams)
ngrams.sort(key=lambda k: (k, len(k)))
with open(output_file, 'w') as f:
f.write('\n'.join(ngrams))
# ngtrie = SimpleTrie() # 构建ngram的Trie树
# print('build ngram trie...')
# for w in tqdm(ngrams):
# _ = ngtrie.add_word(w)
#
# candidates = {} # 得到候选词
# print('discovering words...')
# txts = [os.path.join('./data', each) for each in os.listdir('./data')]
# for txt in txts:
# d = open(txt, encoding='utf-8').read()
# d = d.replace(u'\u3000', ' ').strip()
# d = re.sub(u'[^\u4e00-\u9fa50-9a-zA-Z ]+', '\n', d)
# print(txt, 'tokenizing...')
# tokens = ngtrie.tokenize(d)
# print(txt, 'gen candidates...')
# for w in tqdm(tokens): # 预分词
# candidates[w] = candidates.get(w, 0) + 1
#
# # 频数过滤
# candidates = {i: j for i, j in candidates.items() if j >= min_count}
# # 互信息过滤(回溯)
# candidates = filter_vocab(candidates, ngrams, order)
#
# # 输出结果文件
# with open(output_file, 'w', encoding='utf-8') as f:
# for i, j in sorted(candidates.items(), key=lambda s: -s[1]):
# s = '%s %s\n' % (i, j)
# f.write(s)
| [
"[email protected]"
] | |
4890cec1b8a3eaa8cba2cc2ab2ac83c56273c669 | 9d4a03990d94e9fb0248ec94875376c82139891a | /Python实战:四周实现爬虫系统/week_2/2_1/coed_of_video_test.py | 3c7ec1007817205d0834c78e477ba2709266b5bf | [] | no_license | wenhaoliang/learn-python | 114744f3c94859f665a998b03c6d5f5d908fb94d | bd31daa34cc79f3171a2e195c149af60a7e0ebed | refs/heads/master | 2020-04-16T02:14:36.345629 | 2019-07-06T12:26:23 | 2019-07-06T12:26:23 | 58,053,238 | 7 | 7 | null | 2017-07-21T09:35:55 | 2016-05-04T13:14:26 | Python | UTF-8 | Python | false | false | 574 | py | import pymongo
client = pymongo.MongoClient('localhost',27017)
walden = client['walden']
sheet_ta = walden['sheet_ta']
path = 'walden.txt'
# with open(path,'r') as f:
# lines = f.readlines()
# for index,line in enumerate(lines):
# data = {
# '序列':index,
# '句子' :line,
# '单词数量':len(line.split())
# }
# sheet_ta.insert_one(data)
# $lt/$lte/$gt/$gte/$ne,依次等价于</<=/>/>=/!=。(l表示less g表示greater e表示equal n表示not )
for item in sheet_ta.find():
print(item)
| [
"[email protected]"
] | |
9aab9e36ef99567c70e4715d279ae9ef813a672e | 04d8f0b5a291ec6c3470f4498dd64ab9c1845f96 | /library/built-in/database/dbm/ex.py | e2347d6fe6f3c862de6cdd12869a7f251d5b925c | [] | no_license | volitilov/Python_learn | 8c0f54d89e0ead964320d17eeddeacd5b704b717 | f89e52655f83a9f1105689f0302ef5b0ee30a25c | refs/heads/master | 2022-01-10T13:39:59.237716 | 2019-07-17T11:39:10 | 2019-07-17T11:39:10 | 70,601,503 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | import dbm
# :::::::::::::::::::::::::::::::::::::::::::::::::::
db = dbm.open('test_db', 'c')
db['color'] = 'yellow'
db['base'] = 'postgresql'
# print(len(db)) # 2
# print(db['base']) # b'postgresql'
db.close() | [
"[email protected]"
] | |
ab00117ee31dc1546140039d63966c6b83f2cafc | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p00001/s250444754.py | 9c0e0cef29bc195aedb50f3701974ecffe83e6bb | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | h=[]
for i in range(10):
h.append((int)(input()))
h.sort()
h.reverse()
for i in range(3):
print(h[i])
| [
"[email protected]"
] | |
b3a5bd338a2ef2f1cffb1d0d5665b8859fd3b5f5 | d785e993ed65049c82607a1482b45bddb2a03dda | /nano2017/cfg2018/WZTo3LNu_0Jets_MLL-4to50_cfg.py | 78a3135cc48d53686e3d778eb84aa962dc40c43e | [] | no_license | PKUHEPEWK/ssww | eec02ad7650014646e1bcb0e8787cf1514aaceca | a507a289935b51b8abf819b1b4b05476a05720dc | refs/heads/master | 2020-05-14T04:15:35.474981 | 2019-06-28T23:48:15 | 2019-06-28T23:48:15 | 181,696,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py | from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = Configuration()
config.section_("General")
config.General.requestName = 'WZTo3LNu_0Jets_MLL-4to50_2018'
config.General.transferLogs= False
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'PSet.py'
config.JobType.scriptExe = 'crab_script_2018.sh'
config.JobType.inputFiles = ['crab_script_2018.py','ssww_keep_and_drop_2018.txt','ssww_output_branch_selection_2018.txt','haddnano.py'] #hadd nano will not be needed once nano tools are in cmssw
config.JobType.sendPythonFolder = True
config.section_("Data")
config.Data.inputDataset = '/WZTo3LNu_0Jets_MLL-4to50_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIAutumn18NanoAODv4-Nano14Dec2018_102X_upgrade2018_realistic_v16-v1/NANOAODSIM'
#config.Data.inputDBS = 'phys03'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
#config.Data.splitting = 'EventAwareLumiBased'
config.Data.unitsPerJob = 20
config.Data.totalUnits = -1
config.Data.outLFNDirBase ='/store/user/%s/nano2018_v0' % (getUsernameFromSiteDB())
config.Data.publication = False
config.Data.outputDatasetTag = 'WZTo3LNu_0Jets_MLL-4to50_2018'
config.section_("Site")
config.Site.storageSite = "T2_CN_Beijing"
#config.Site.storageSite = "T2_CH_CERN"
#config.section_("User")
#config.User.voGroup = 'dcms'
| [
"[email protected]"
] | |
b7a171a8ca7594633d34875c4d50705544339839 | 3b13020b492003912b2da62ff29a00e584a63766 | /examples/porta.py | 0dd8608dd36b1443ee39805620dc2511b8d38284 | [
"MIT"
] | permissive | tigertv/secretpy | c0d62a2934fa5ac1e07f1c848429fc062e2f2976 | e464f998e5540f52e269fe360ec9d3a08e976b2e | refs/heads/master | 2021-08-16T00:05:53.089587 | 2021-08-09T23:55:42 | 2021-08-09T23:58:09 | 147,110,283 | 65 | 15 | MIT | 2021-05-28T16:49:09 | 2018-09-02T18:15:14 | Python | UTF-8 | Python | false | false | 1,755 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
from secretpy import Porta, CryptMachine, alphabets as al
from secretpy.cmdecorators import UpperCase, Block, SaveAll
alphabet = al.GERMAN
plaintext = u"schweißgequältvomödentextzürnttypografjakob"
key = u"schlüssel"
cipher = Porta()
print(plaintext)
enc = cipher.encrypt(plaintext, key, alphabet)
print(enc)
dec = cipher.decrypt(enc, key, alphabet)
print(dec)
#######################################################
def encdec(machine, plaintext):
print("--------------------------------------------------------------------")
print(plaintext)
enc = machine.encrypt(plaintext)
print(enc)
print(machine.decrypt(enc))
cm0 = CryptMachine(cipher, key)
cm = cm0
cm.set_alphabet(al.ENGLISH)
cm.set_key("keys")
plaintext = "I don't love non-alphabet characters. I will remove all of them: ^,&@$~(*;?&#. Great!"
encdec(cm, plaintext)
cm = Block(cm, length=4, sep="::")
plaintext = "This text is divided by blocks of length 5!"
encdec(cm, plaintext)
cm = SaveAll(cm0)
plaintext = "I love non-alphabet characters. These are : ^,&@$~(*;?&#. That's it!"
encdec(cm, plaintext)
cm.set_alphabet(al.ENGLISH_SQUARE_IJ)
plaintext = "Jj becomes Ii because we use ENGLISH_SQUARE_IJ!"
encdec(cm, plaintext)
cm.set_alphabet(al.JAPANESE_HIRAGANA)
cm.set_key(u"かぎ")
plaintext = u"text いろはにほへと ちりぬるを わかよたれそ つねならむ うゐのおくやま けふこえて あさきゆめみし ゑひもせす !"
encdec(cm, plaintext)
cm = UpperCase(cm)
alphabet = al.GREEK
cm.set_alphabet(alphabet)
cm.set_key(u"κλειδί")
plaintext = u"Θέλει αρετή και τόλμη η ελευθερία. (Ανδρέας Κάλβος)"
encdec(cm, plaintext)
'''
'''
| [
"[email protected]"
] | |
3b22ec060a5ee9a4d7e472fcd58efc85a0fa7166 | 05caf48bd067c050666026b75686f23d02327378 | /190.reverse-bits.py | 0354dd2a79eead8c8799d5b9bbb18ef9c64541ca | [
"MIT"
] | permissive | elfgzp/Leetcode | 3b6fa307c699fd5a1ba5ea88988c324c33a83eb7 | 964c6574d310a9a6c486bf638487fd2f72b83b3f | refs/heads/master | 2023-08-21T23:11:38.265884 | 2020-10-17T11:55:45 | 2020-10-17T11:55:45 | 168,635,331 | 3 | 0 | MIT | 2023-07-21T03:50:43 | 2019-02-01T03:14:49 | Python | UTF-8 | Python | false | false | 1,718 | py | #
# @lc app=leetcode.cn id=190 lang=python
#
# [190] 颠倒二进制位
#
# https://leetcode-cn.com/problems/reverse-bits/description/
#
# algorithms
# Easy (34.07%)
# Total Accepted: 10.9K
# Total Submissions: 28.7K
# Testcase Example: '00000010100101000001111010011100'
#
# 颠倒给定的 32 位无符号整数的二进制位。
#
#
#
# 示例 1:
#
# 输入: 00000010100101000001111010011100
# 输出: 00111001011110000010100101000000
# 解释: 输入的二进制串 00000010100101000001111010011100 表示无符号整数 43261596,
# 因此返回 964176192,其二进制表示形式为 00111001011110000010100101000000。
#
# 示例 2:
#
# 输入:11111111111111111111111111111101
# 输出:10111111111111111111111111111111
# 解释:输入的二进制串 11111111111111111111111111111101 表示无符号整数 4294967293,
# 因此返回 3221225471 其二进制表示形式为 10101111110010110010011101101001。
#
#
#
# 提示:
#
#
# 请注意,在某些语言(如
# Java)中,没有无符号整数类型。在这种情况下,输入和输出都将被指定为有符号整数类型,并且不应影响您的实现,因为无论整数是有符号的还是无符号的,其内部的二进制表示形式都是相同的。
# 在 Java 中,编译器使用二进制补码记法来表示有符号整数。因此,在上面的 示例 2 中,输入表示有符号整数 -3,输出表示有符号整数
# -1073741825。
#
#
#
#
# 进阶:
# 如果多次调用这个函数,你将如何优化你的算法?
#
#
class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
bits = "{:0>32b}".format(n)
return int(bits[::-1], 2)
| [
"[email protected]"
] | |
28a5e30cebf3bae89a61faf4acd46d0b226d4ceb | 8567438779e6af0754620a25d379c348e4cd5a5d | /third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py | 9f7c39861ca14381afdc1e8db9e3c93347493dcf | [
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | thngkaiyuan/chromium | c389ac4b50ccba28ee077cbf6115c41b547955ae | dab56a4a71f87f64ecc0044e97b4a8f247787a68 | refs/heads/master | 2022-11-10T02:50:29.326119 | 2017-04-08T12:28:57 | 2017-04-08T12:28:57 | 84,073,924 | 0 | 1 | BSD-3-Clause | 2022-10-25T19:47:15 | 2017-03-06T13:04:15 | null | UTF-8 | Python | false | false | 8,191 | py | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models.test_expectations import TestExpectations, TestExpectationLine
from webkitpy.common.net.layout_test_results import LayoutTestResults
class BuildBotPrinter(object):
# This output is parsed by buildbots and must only be changed in coordination with buildbot scripts (see webkit.org's
# Tools/BuildSlaveSupport/build.webkit.org-config/master.cfg: RunWebKitTests._parseNewRunWebKitTestsOutput
# and chromium.org's buildbot/master.chromium/scripts/master/log_parser/webkit_test_command.py).
def __init__(self, stream, debug_logging):
self.stream = stream
self.debug_logging = debug_logging
def print_results(self, run_details):
if self.debug_logging:
self.print_run_results(run_details.initial_results)
self.print_unexpected_results(run_details.summarized_full_results, run_details.enabled_pixel_tests_in_retry)
def _print(self, msg):
self.stream.write(msg + '\n')
def print_run_results(self, run_results):
failed = run_results.total_failures
total = run_results.total
passed = total - failed - run_results.remaining
percent_passed = 0.0
if total > 0:
percent_passed = float(passed) * 100 / total
self._print("=> Results: %d/%d tests passed (%.1f%%)" % (passed, total, percent_passed))
self._print("")
self._print_run_results_entry(run_results, test_expectations.NOW, "Tests to be fixed")
self._print("")
# FIXME: We should be skipping anything marked WONTFIX, so we shouldn't bother logging these stats.
self._print_run_results_entry(run_results, test_expectations.WONTFIX,
"Tests that will only be fixed if they crash (WONTFIX)")
self._print("")
def _print_run_results_entry(self, run_results, timeline, heading):
total = len(run_results.tests_by_timeline[timeline])
not_passing = (total -
len(run_results.tests_by_expectation[test_expectations.PASS] &
run_results.tests_by_timeline[timeline]))
self._print("=> %s (%d):" % (heading, not_passing))
for result in TestExpectations.EXPECTATION_DESCRIPTIONS.keys():
if result in (test_expectations.PASS, test_expectations.SKIP):
continue
results = (run_results.tests_by_expectation[result] & run_results.tests_by_timeline[timeline])
desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result]
if not_passing and len(results):
pct = len(results) * 100.0 / not_passing
self._print(" %5d %-24s (%4.1f%%)" % (len(results), desc, pct))
def print_unexpected_results(self, summarized_results, enabled_pixel_tests_in_retry=False):
passes = {}
flaky = {}
regressions = {}
def add_to_dict_of_lists(dict, key, value):
dict.setdefault(key, []).append(value)
def add_result(result):
test = result.test_name()
actual = result.actual_results().split(" ")
expected = result.expected_results().split(" ")
if result.did_run_as_expected():
# Don't print anything for tests that ran as expected.
return
if actual == ['PASS']:
if 'CRASH' in expected:
add_to_dict_of_lists(passes, 'Expected to crash, but passed', test)
elif 'TIMEOUT' in expected:
add_to_dict_of_lists(passes, 'Expected to timeout, but passed', test)
else:
add_to_dict_of_lists(passes, 'Expected to fail, but passed', test)
elif enabled_pixel_tests_in_retry and actual == ['TEXT', 'IMAGE+TEXT']:
add_to_dict_of_lists(regressions, actual[0], test)
elif len(actual) > 1 and bool(set(actual[1:]) & set(expected)):
# We group flaky tests by the first actual result we got.
add_to_dict_of_lists(flaky, actual[0], test)
else:
add_to_dict_of_lists(regressions, actual[0], test)
test_results = LayoutTestResults(summarized_results)
test_results.for_each_test(add_result)
if len(passes) or len(flaky) or len(regressions):
self._print("")
if len(passes):
for key, tests in passes.iteritems():
self._print("%s: (%d)" % (key, len(tests)))
tests.sort()
for test in tests:
self._print(" %s" % test)
self._print("")
self._print("")
if len(flaky):
descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
for key, tests in flaky.iteritems():
result_type = TestExpectations.EXPECTATIONS[key.lower()]
self._print("Unexpected flakiness: %s (%d)" % (descriptions[result_type], len(tests)))
tests.sort()
for test in tests:
result = test_results.result_for_test(test)
actual = result.actual_results().split(" ")
expected = result.expected_results().split(" ")
# FIXME: clean this up once the old syntax is gone
new_expectations_list = [TestExpectationLine.inverted_expectation_tokens[exp]
for exp in list(set(actual) | set(expected))]
self._print(" %s [ %s ]" % (test, " ".join(new_expectations_list)))
self._print("")
self._print("")
if len(regressions):
descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
for key, tests in regressions.iteritems():
result_type = TestExpectations.EXPECTATIONS[key.lower()]
self._print("Regressions: Unexpected %s (%d)" % (descriptions[result_type], len(tests)))
tests.sort()
for test in tests:
result = test_results.result_for_test(test)
actual = result.actual_results().split(" ")
expected = result.expected_results().split(" ")
new_expectations_list = [TestExpectationLine.inverted_expectation_tokens[exp] for exp in actual]
self._print(" %s [ %s ]" % (test, " ".join(new_expectations_list)))
self._print("")
if len(summarized_results['tests']) and self.debug_logging:
self._print("%s" % ("-" * 78))
| [
"[email protected]"
] | |
ebc6bcb0e32507f21be5c31bd75c0749d8cfa2a2 | d7532e2ac4983c042f50525aab564597db154719 | /day2/strings_2/5.py | 28eea9134e6d119f85cd7e9ebcb7e881cc234697 | [] | no_license | shobhit-nigam/qti_panda | d53195def05605ede24a5108de1dbfbe56cbffe7 | 35d52def5d8ef1874e795a407768fd4a02834418 | refs/heads/main | 2023-08-24T14:56:34.934694 | 2021-10-22T09:59:05 | 2021-10-22T09:59:05 | 418,381,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | spain = [34, 67, 12, 55, 89]
sep = '__'
strx = sep.join(str(x) for x in spain)
print(strx)
# another way
# list comprehensions
| [
"[email protected]"
] | |
67b64deafbb427a7ebd867ae2548343252614cb8 | 13ea6fa027c8ae33852bde3335846cdaab78ee71 | /DataScienceWithPython/sample_python_code/ml/supervised/MNIST-knn.py | 2b9c0d32ff6783e60788238adaea7dd368a1f00c | [] | no_license | dmonisankar/pythonworks | c98de04b191135451556ca9d1ee513a0a69f2edb | 4f3a14460272ec959c2f2e6975814d9ac43cb90a | refs/heads/master | 2023-03-31T00:36:46.016403 | 2020-06-11T05:39:36 | 2020-06-11T05:39:36 | 271,455,493 | 0 | 0 | null | 2021-03-20T04:19:45 | 2020-06-11T05:00:00 | Jupyter Notebook | UTF-8 | Python | false | false | 3,736 | py | # Train/Test Split + Fit/Predict/Accuracy
# Now that you have learned about the importance of splitting your data into training and test sets, it's time to practice doing this on the digits dataset! After creating arrays for the features and target variable, you will split them into training and test sets, fit a k-NN classifier to the training data, and then compute its accuracy using the .score() method.
# Instructions
# Import KNeighborsClassifier from sklearn.neighbors and train_test_split from sklearn.model_selection.
# Create an array for the features using digits.data and an array for the target using digits.target.
# Create stratified training and test sets using 0.2 for the size of the test set. Use a random state of 42. Stratify the split according to the labels so that they are distributed in the training and test sets as they are in the original dataset.
# Create a k-NN classifier with 7 neighbors and fit it to the training data.
# Compute and print the accuracy of the classifier's predictions using the .score() method.
# Import necessary modules
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn import datasets
import numpy as np
import matplotlib.pyplot as plt
digits = datasets.load_digits()
# Create feature and target arrays
X = digits.data
y = digits.target
# Split into training and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=42, stratify=y)
# Create a k-NN classifier with 7 neighbors: knn
knn = KNeighborsClassifier(n_neighbors=7)
# Fit the classifier to the training data
knn.fit(X_train,y_train)
# Print the accuracy
print(knn.score(X_test, y_test))
# -------------------------------------------------------------------------------------------------------------------------------
# Remember the model complexity curve that Hugo showed in the video? You will now construct such a curve for the digits dataset! In this exercise, you will compute and plot the training and testing accuracy scores for a variety of different neighbor values. By observing how the accuracy scores differ for the training and testing sets with different values of k, you will develop your intuition for overfitting and underfitting.
# The training and testing sets are available to you in the workspace as X_train, X_test, y_train, y_test. In addition, KNeighborsClassifier has been imported from sklearn.neighbors.
# Instructions
# Inside the for loop:
# Setup a k-NN classifier with the number of neighbors equal to k.
# Fit the classifier with k neighbors to the training data.
# Compute accuracy scores the training set and test set separately using the .score() method and assign the results to the train_accuracy and test_accuracy arrays respectively.
# Setup arrays to store train and test accuracies
neighbors = np.arange(1, 9)
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))
# Loop over different values of k
for i, k in enumerate(neighbors):
# Setup a k-NN Classifier with k neighbors: knn
knn = KNeighborsClassifier(n_neighbors=k)
# Fit the classifier to the training data
knn.fit(X_train,y_train)
#Compute accuracy on the training set
train_accuracy[i] = knn.score(X_train, y_train)
#Compute accuracy on the testing set
test_accuracy[i] = knn.score(X_test, y_test)
# Generate plot
plt.title('k-NN: Varying Number of Neighbors')
plt.plot(neighbors, test_accuracy, label = 'Testing Accuracy')
plt.plot(neighbors, train_accuracy, label = 'Training Accuracy')
plt.legend()
plt.xlabel('Number of Neighbors')
plt.ylabel('Accuracy')
plt.show() | [
"[email protected]"
] | |
78d27f7b1092241934f1510f40ad8bfc3ece1523 | 8be2c3a2ee48b004f5894899f5b06d2c8a91d044 | /1290. Convert Binary Number in a Linked List to Integer.py | ae85f70de8e54ae6a54c9caa78f230b822a70c87 | [] | no_license | themockingjester/leetcode-python- | 8ea8caf047b4ad2ebc63d98278d96f0bdd788a34 | eda7d6d1d1860c4382b20acfb69e03c648845e72 | refs/heads/main | 2023-07-07T10:02:45.796512 | 2021-08-11T03:53:42 | 2021-08-11T03:53:42 | 337,762,767 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def check(self,root):
if root:
self.s+=str(root.val)
self.check(root.next)
def getDecimalValue(self, head: ListNode) -> int:
if head:
self.s = ""
self.check(head)
print(self.s)
return int(self.s,2)
| [
"[email protected]"
] | |
288debbd2d6ead371f0da1214457fd35434bc1eb | 51d0377511a5da902033fb9d80184db0e096fe2c | /10-merging-dataframes-with-pandas/3-merging-data/05-merging-dataframes-with-outer-join.py | 7a31458d2aa2968025b9b9bae577f0e16510903a | [] | no_license | sashakrasnov/datacamp | c28c6bda178163337baed646220b2f7dcc36047d | 759f4cec297883907e21118f24a3449d84c80761 | refs/heads/master | 2021-12-07T02:54:51.190672 | 2021-09-17T21:05:29 | 2021-09-17T21:05:29 | 157,093,632 | 6 | 5 | null | null | null | null | UTF-8 | Python | false | false | 4,734 | py | '''
Merging DataFrames with outer join
This exercise picks up where the previous one left off. The DataFrames revenue, managers, and sales are pre-loaded into your namespace (and, of course, pandas is imported as pd). Moreover, the merged DataFrames revenue_and_sales and sales_and_managers have been pre-computed exactly as you did in the previous exercise.
The merged DataFrames contain enough information to construct a DataFrame with 5 rows with all known information correctly aligned and each branch listed only once. You will try to merge the merged DataFrames on all matching keys (which computes an inner join by default). You can compare the result to an outer join and also to an outer join with restricted subset of columns as keys.
'''
import pandas as pd
revenue = pd.DataFrame({
'branch_id': [10, 20, 30, 47],
'city': ['Austin', 'Denver', 'Springfield', 'Mendocino'],
'revenue': [100, 83, 4, 200],
'state': ['TX', 'CO', 'IL', 'CA']
})
managers = pd.DataFrame({
'branch': ['Austin', 'Denver', 'Mendocino', 'Springfield'],
'branch_id': [10, 20, 47, 31],
'manager': ['Charles', 'Joel', 'Brett', 'Sally'],
'state': ['TX', 'CO', 'CA', 'MO']
})
sales = pd.DataFrame({
'city': [ 'Mendocino', 'Denver', 'Austin', 'Springfield', 'Springfield'],
'state': ['CA', 'CO', 'TX', 'MO', 'IL'],
'units': [1, 4, 2, 5, 1]
})
revenue_and_sales = pd.merge(revenue, sales, how='right', on=['city','state'])
sales_and_managers = pd.merge(sales, managers, how='left', left_on=['city','state'], right_on=['branch','state'])
'''
INSTRUCTIONS
* Merge sales_and_managers with revenue_and_sales. Store the result as merge_default.
* Print merge_default. This has been done for you.
* Merge sales_and_managers with revenue_and_sales using how='outer'. Store the result as merge_outer.
* Print merge_outer. This has been done for you.
* Merge sales_and_managers with revenue_and_sales only on ['city','state'] using an outer join. Store the result as merge_outer_on and hit 'Submit Answer' to see what the merged DataFrames look like!
'''
# Perform the first merge: merge_default
merge_default = pd.merge(sales_and_managers, revenue_and_sales)
# Print merge_default
print(merge_default)
# Perform the second merge: merge_outer
merge_outer = pd.merge(sales_and_managers, revenue_and_sales, how='outer')
# Print merge_outer
print(merge_outer)
# Perform the third merge: merge_outer_on
merge_outer_on = pd.merge(sales_and_managers, revenue_and_sales, on=['city','state'], how='outer')
# Print merge_outer_on
print(merge_outer_on)
'''
> revenue_and_sales
branch_id city revenue state units
0 10.0 Austin 100.0 TX 2
1 20.0 Denver 83.0 CO 4
2 30.0 Springfield 4.0 IL 1
3 47.0 Mendocino 200.0 CA 1
4 NaN Springfield NaN MO 5
> sales_and_managers
city state units branch branch_id manager
0 Mendocino CA 1 Mendocino 47.0 Brett
1 Denver CO 4 Denver 20.0 Joel
2 Austin TX 2 Austin 10.0 Charles
3 Springfield MO 5 Springfield 31.0 Sally
4 Springfield IL 1 NaN NaN NaN
> merge_default
city state units branch branch_id manager revenue
0 Mendocino CA 1 Mendocino 47.0 Brett 200.0
1 Denver CO 4 Denver 20.0 Joel 83.0
2 Austin TX 2 Austin 10.0 Charles 100.0
> merge_outer
city state units branch branch_id manager revenue
0 Mendocino CA 1 Mendocino 47.0 Brett 200.0
1 Denver CO 4 Denver 20.0 Joel 83.0
2 Austin TX 2 Austin 10.0 Charles 100.0
3 Springfield MO 5 Springfield 31.0 Sally NaN
4 Springfield IL 1 NaN NaN NaN NaN
5 Springfield IL 1 NaN 30.0 NaN 4.0
6 Springfield MO 5 NaN NaN NaN NaN
> merge_outer_on
city state units_x branch branch_id_x manager branch_id_y revenue units_y
0 Mendocino CA 1 Mendocino 47.0 Brett 47.0 200.0 1
1 Denver CO 4 Denver 20.0 Joel 20.0 83.0 4
2 Austin TX 2 Austin 10.0 Charles 10.0 100.0 2
3 Springfield MO 5 Springfield 31.0 Sally NaN NaN 5
4 Springfield IL 1 NaN NaN NaN 30.0 4.0 1
''' | [
"[email protected]"
] | |
865583beca3fb707cdb0ca6e0bf1a2c5ad855011 | 5577a04c006e73b8a40f68055b2173ffe34ce83e | /htsint/version.py | 4cd84d84873f49809c25cbb132ee84a7af132708 | [
"BSD-3-Clause",
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | changanla/htsint | 1617c56bd5f02ab01e0de80d3d06d2d75983a376 | a343aff9b833979b4f5d4ba6d16fc2b65d8ccfc1 | refs/heads/master | 2020-03-16T13:10:15.082839 | 2017-05-24T21:27:27 | 2017-05-24T21:27:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | MAJOR = 0
MINOR = 5
MICRO = 2
__version__ = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
| [
"[email protected]"
] | |
db3f25723dd795bcab80fc1f8b11a28cb554856d | 74d0235c4eed1e4bc57dd906d2b3958cb48b9dba | /test/package/test_directory_reader.py | 576a7f0c064cd5611c6407557e00686670650e4a | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | anjali411/pytorch | a31ecf84fe892f19452b1063f2b1de1f88d84bb0 | 51b67f2bca3014aa5e7f675237543b8f82743032 | refs/heads/master | 2022-07-22T16:58:56.800837 | 2021-10-14T17:22:15 | 2021-10-14T17:23:55 | 208,863,312 | 1 | 0 | NOASSERTION | 2020-05-14T06:54:25 | 2019-09-16T17:56:13 | C++ | UTF-8 | Python | false | false | 10,506 | py | # -*- coding: utf-8 -*-
import os
import zipfile
from sys import version_info
from tempfile import TemporaryDirectory
from textwrap import dedent
from unittest import skipIf
import torch
from torch.package import PackageExporter, PackageImporter
from torch.testing._internal.common_utils import (
run_tests,
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
)
try:
from torchvision.models import resnet18
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = skipIf(not HAS_TORCHVISION, "no torchvision")
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
from pathlib import Path
packaging_directory = Path(__file__).parent
@skipIf(
IS_FBCODE or IS_SANDCASTLE or IS_WINDOWS,
"Tests that use temporary files are disabled in fbcode",
)
class DirectoryReaderTest(PackageTestCase):
"""Tests use of DirectoryReader as accessor for opened packages."""
@skipIfNoTorchVision
def test_loading_pickle(self):
"""
Test basic saving and loading of modules and pickles from a DirectoryReader.
"""
resnet = resnet18()
filename = self.temp()
with PackageExporter(filename) as e:
e.intern("**")
e.save_pickle("model", "model.pkl", resnet)
zip_file = zipfile.ZipFile(filename, "r")
with TemporaryDirectory() as temp_dir:
zip_file.extractall(path=temp_dir)
importer = PackageImporter(Path(temp_dir) / Path(filename).name)
dir_mod = importer.load_pickle("model", "model.pkl")
input = torch.rand(1, 3, 224, 224)
self.assertEqual(dir_mod(input), resnet(input))
def test_loading_module(self):
"""
Test basic saving and loading of a packages from a DirectoryReader.
"""
import package_a
filename = self.temp()
with PackageExporter(filename) as e:
e.save_module("package_a")
zip_file = zipfile.ZipFile(filename, "r")
with TemporaryDirectory() as temp_dir:
zip_file.extractall(path=temp_dir)
dir_importer = PackageImporter(Path(temp_dir) / Path(filename).name)
dir_mod = dir_importer.import_module("package_a")
self.assertEqual(dir_mod.result, package_a.result)
def test_loading_has_record(self):
"""
Test DirectoryReader's has_record().
"""
import package_a # noqa: F401
filename = self.temp()
with PackageExporter(filename) as e:
e.save_module("package_a")
zip_file = zipfile.ZipFile(filename, "r")
with TemporaryDirectory() as temp_dir:
zip_file.extractall(path=temp_dir)
dir_importer = PackageImporter(Path(temp_dir) / Path(filename).name)
self.assertTrue(dir_importer.zip_reader.has_record("package_a/__init__.py"))
self.assertFalse(dir_importer.zip_reader.has_record("package_a"))
@skipIf(version_info < (3, 7), "ResourceReader API introduced in Python 3.7")
def test_resource_reader(self):
"""Tests DirectoryReader as the base for get_resource_reader."""
filename = self.temp()
with PackageExporter(filename) as pe:
# Layout looks like:
# package
# ├── one/
# │ ├── a.txt
# │ ├── b.txt
# │ ├── c.txt
# │ └── three/
# │ ├── d.txt
# │ └── e.txt
# └── two/
# ├── f.txt
# └── g.txt
pe.save_text("one", "a.txt", "hello, a!")
pe.save_text("one", "b.txt", "hello, b!")
pe.save_text("one", "c.txt", "hello, c!")
pe.save_text("one.three", "d.txt", "hello, d!")
pe.save_text("one.three", "e.txt", "hello, e!")
pe.save_text("two", "f.txt", "hello, f!")
pe.save_text("two", "g.txt", "hello, g!")
zip_file = zipfile.ZipFile(filename, "r")
with TemporaryDirectory() as temp_dir:
zip_file.extractall(path=temp_dir)
importer = PackageImporter(Path(temp_dir) / Path(filename).name)
reader_one = importer.get_resource_reader("one")
# Different behavior from still zipped archives
resource_path = os.path.join(
Path(temp_dir), Path(filename).name, "one", "a.txt"
)
self.assertEqual(reader_one.resource_path("a.txt"), resource_path)
self.assertTrue(reader_one.is_resource("a.txt"))
self.assertEqual(
reader_one.open_resource("a.txt").getbuffer(), b"hello, a!"
)
self.assertFalse(reader_one.is_resource("three"))
reader_one_contents = list(reader_one.contents())
reader_one_contents.sort()
self.assertSequenceEqual(
reader_one_contents, ["a.txt", "b.txt", "c.txt", "three"]
)
reader_two = importer.get_resource_reader("two")
self.assertTrue(reader_two.is_resource("f.txt"))
self.assertEqual(
reader_two.open_resource("f.txt").getbuffer(), b"hello, f!"
)
reader_two_contents = list(reader_two.contents())
reader_two_contents.sort()
self.assertSequenceEqual(reader_two_contents, ["f.txt", "g.txt"])
reader_one_three = importer.get_resource_reader("one.three")
self.assertTrue(reader_one_three.is_resource("d.txt"))
self.assertEqual(
reader_one_three.open_resource("d.txt").getbuffer(), b"hello, d!"
)
reader_one_three_contents = list(reader_one_three.contents())
reader_one_three_contents.sort()
self.assertSequenceEqual(reader_one_three_contents, ["d.txt", "e.txt"])
self.assertIsNone(importer.get_resource_reader("nonexistent_package"))
@skipIf(version_info < (3, 7), "ResourceReader API introduced in Python 3.7")
def test_package_resource_access(self):
"""Packaged modules should be able to use the importlib.resources API to access
resources saved in the package.
"""
mod_src = dedent(
"""\
import importlib.resources
import my_cool_resources
def secret_message():
return importlib.resources.read_text(my_cool_resources, 'sekrit.txt')
"""
)
filename = self.temp()
with PackageExporter(filename) as pe:
pe.save_source_string("foo.bar", mod_src)
pe.save_text("my_cool_resources", "sekrit.txt", "my sekrit plays")
zip_file = zipfile.ZipFile(filename, "r")
with TemporaryDirectory() as temp_dir:
zip_file.extractall(path=temp_dir)
dir_importer = PackageImporter(Path(temp_dir) / Path(filename).name)
self.assertEqual(
dir_importer.import_module("foo.bar").secret_message(),
"my sekrit plays",
)
@skipIf(version_info < (3, 7), "ResourceReader API introduced in Python 3.7")
def test_importer_access(self):
filename = self.temp()
with PackageExporter(filename) as he:
he.save_text("main", "main", "my string")
he.save_binary("main", "main_binary", "my string".encode("utf-8"))
src = dedent(
"""\
import importlib
import torch_package_importer as resources
t = resources.load_text('main', 'main')
b = resources.load_binary('main', 'main_binary')
"""
)
he.save_source_string("main", src, is_package=True)
zip_file = zipfile.ZipFile(filename, "r")
with TemporaryDirectory() as temp_dir:
zip_file.extractall(path=temp_dir)
dir_importer = PackageImporter(Path(temp_dir) / Path(filename).name)
m = dir_importer.import_module("main")
self.assertEqual(m.t, "my string")
self.assertEqual(m.b, "my string".encode("utf-8"))
@skipIf(version_info < (3, 7), "ResourceReader API introduced in Python 3.7")
def test_resource_access_by_path(self):
"""
Tests that packaged code can used importlib.resources.path.
"""
filename = self.temp()
with PackageExporter(filename) as e:
e.save_binary("string_module", "my_string", "my string".encode("utf-8"))
src = dedent(
"""\
import importlib.resources
import string_module
with importlib.resources.path(string_module, 'my_string') as path:
with open(path, mode='r', encoding='utf-8') as f:
s = f.read()
"""
)
e.save_source_string("main", src, is_package=True)
zip_file = zipfile.ZipFile(filename, "r")
with TemporaryDirectory() as temp_dir:
zip_file.extractall(path=temp_dir)
dir_importer = PackageImporter(Path(temp_dir) / Path(filename).name)
m = dir_importer.import_module("main")
self.assertEqual(m.s, "my string")
def test_scriptobject_failure_message(self):
"""
Test basic saving and loading of a ScriptModule in a directory.
Currently not supported.
"""
from package_a.test_module import ModWithTensor
scripted_mod = torch.jit.script(ModWithTensor(torch.rand(1, 2, 3)))
filename = self.temp()
with PackageExporter(filename) as e:
e.save_pickle("res", "mod.pkl", scripted_mod)
zip_file = zipfile.ZipFile(filename, "r")
with self.assertRaisesRegex(
RuntimeError,
"Loading ScriptObjects from a PackageImporter created from a "
"directory is not supported. Use a package archive file instead.",
):
with TemporaryDirectory() as temp_dir:
zip_file.extractall(path=temp_dir)
dir_importer = PackageImporter(Path(temp_dir) / Path(filename).name)
dir_mod = dir_importer.load_pickle("res", "mod.pkl")
if __name__ == "__main__":
run_tests()
| [
"[email protected]"
] | |
667999762976f5897af604a84543897c3dfe2b68 | 568d7d17d09adeeffe54a1864cd896b13988960c | /month03.2/django/day05/mysitel3/otm/migrations/0001_initial.py | b086e0cf6fc07d1aa13bbaa6ef12c79c1ce841bb | [
"Apache-2.0"
] | permissive | Amiao-miao/all-codes | e2d1971dfd4cecaaa291ddf710999f2fc4d8995f | ec50036d42d40086cac5fddf6baf4de18ac91e55 | refs/heads/main | 2023-02-24T10:36:27.414153 | 2021-02-01T10:51:55 | 2021-02-01T10:51:55 | 334,908,634 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | # Generated by Django 2.2.12 on 2021-01-13 07:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Publisher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True, verbose_name='出版社名')),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='书名')),
('publisher', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='otm.Publisher')),
],
),
]
| [
"[email protected]"
] | |
89bd7df5f7a432880d48493b2c552aee2bc579cf | a19275ff09caf880e135bce76dc7a0107ec0369e | /catkin_ws/src/robot_python/nodes/int_adp_imp_gazebo_node.py | 507b24e5a419629eee10af64ead79937980c61a4 | [] | no_license | xtyzhen/Multi_arm_robot | e201c898a86406c1b1deb82326bb2157d5b28975 | 15daf1a80c781c1c929ba063d779c0928a24b117 | refs/heads/master | 2023-03-21T14:00:24.128957 | 2021-03-10T12:04:36 | 2021-03-10T12:04:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,051 | py | #!/usr/bin/env python
# -*-coding:utf-8-*-
#本文档用于上层积分自适应阻抗控制仿真
#程序员:陈永厅
#版权:哈尔滨工业大学
#日期:初稿:2020.1.11
import rospy
from std_msgs.msg import Float64MultiArray
from sensor_msgs.msg import JointState
from geometry_msgs.msg import WrenchStamped
import threading
import time
import numpy as np
#自定义函数
from robot_python import ImpedanceControl as imc
from robot_python import integral_adaption_impedance_controller as ia_imc
##定义全局变量
F = np.zeros(6)
qq = np.zeros(7)
##关节角订阅函数
def joint_callback(msg):
global qq
global X
global n
for i in range(7):
qq[i] = msg.position[i]
##六维力回调函数
def force_callback(msg):
global F
Fe = np.zeros(6)
Fe[0] = msg.wrench.force.x
Fe[1] = msg.wrench.force.y
Fe[2] = msg.wrench.force.z
Fe[3] = msg.wrench.torque.x
Fe[4] = msg.wrench.torque.y
Fe[5] = msg.wrench.torque.z
#对力进行坐标变换,同时更新最新时刻的力
F = imc.force_end_to_base(qq, Fe)
##末端力订阅线程
def thread_spin():
rospy.spin()
##阻抗控制运行节点
def node_cloop(pub):
##控制参数
global F #当前3时刻位置偏差,全局变量
global qq #当前关节角位置
Ex = np.zeros([3, 6]) #最近3时刻位置位置偏差
Ef = np.zeros([4, 6]) #最近4个时刻末端力偏差
T = 0.01 #时间周期
Fd = np.zeros(6)
[Xd, qq_init] = ia_imc.get_init_expect_pos() #末端期望位姿,可放入循环中
qq_guess = np.copy(qq_init)
[Md, Bd, Kd, Ki] = ia_imc.get_imc_parameter() #可放入循环中,实时改变参数
rate = rospy.Rate(100)
while not rospy.is_shutdown():
#更新末端力与期望位力的误差
Ef[0, :] = Ef[1, :]
Ef[1, :] = Ef[2, :]
Ef[2, :] = Ef[3, :]
Ef[3, :] = F - Fd
#调用积分自适应阻抗实时计算修正修正值
ia_imc.get_current_error(Md, Bd, Kd, Ki,T, Ef, Ex) #通过引用改变内部参数
print "当前末端力:%s" % F
print "当前关节角:%s" % qq
print "误差计算:%s" % Ex[2,:]
qr = imc.get_current_joint(Xd, Ex[2, :], qq_guess)
command_data = Float64MultiArray()
command_data.data = qr
print "当前command关节角:%s" % qr
pub.publish(command_data)
rate.sleep()
def main():
rospy.init_node('ImpendanceController_node')
rospy.Subscriber('/robot1/joint_states', JointState, joint_callback)
rospy.Subscriber('/robot1/ft_sensor_topic', WrenchStamped, force_callback)
pub = rospy.Publisher('/robot1/armc_position_controller/command', Float64MultiArray, queue_size=10)
#建立用于引用的变量
t1 = threading.Thread(target=thread_spin) # 末端位置订阅线程
print "Impendance controller begain run!;"
t1.start()
node_cloop(pub)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
1a12a25abec059bc393ea4132c9fce703a963a74 | d6ea8bd65e7fffc12575737817083d6456eec828 | /vigenere.py | 2bb25639d7d300467bcedb4607ff5a9a22628d91 | [] | no_license | informatiquecsud/cs-simply-utils | 3888ce47d77e2732044efe555a66c87c25d100e7 | 4c6d82897fbd96f3718f81920a324c379988e403 | refs/heads/master | 2022-10-05T02:27:25.209972 | 2022-09-16T07:53:39 | 2022-09-16T07:53:39 | 217,407,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,835 | py | from utils import alphabet, shift_char
class VigenereCypher:
def __init__(self, key):
self.key = [0] * len(key)
alpha = alphabet()
for i, el in enumerate(key):
if isinstance(el, int):
self.key[i] = el
elif isinstance(el, str):
if el in '0123456789':
self.key[i] = int(el)
elif el == '_':
self.key[i] = el
elif el in alpha:
self.key[i] = ord(el.upper()) - ord('A')
# try:
# int(key)
# self.key = [int(c) for c in key]
# except:
# self.key = [ord(c) - ord('A') for c in key]
def encrypt(self, message, key=None):
key = key or self.key
N = len(key)
result = [''] * len(message)
for i, char in enumerate(message):
result[i] = shift_char(char, key[i % N]) if key[i %
N] != '_' else key[i % N]
return ''.join(result)
def decrypt(self, crypted):
return self.encrypt(crypted, key=[-k if isinstance(k, int) else k for k in self.key])
if __name__ == "__main__":
c = VigenereCypher(key='BC')
assert c.key == [1, 2]
c = VigenereCypher(key='26')
assert c.key == [2, 6]
crypted = c.encrypt('LINFORMATIQUESIMPLEMENT')
print(crypted)
print(c.decrypt('NOPLQXOGVOSAGYKSRRGSGTV'))
crypted = 'OGOPFFJGDNFXFTTVFPHGIGJOTEITJHUGOFFTHGTEIKDJUG'
# for i in range(1, 26):
for i in [1]:
for j in range(1, 26):
c = VigenereCypher(key=[i, j])
print(c.decrypt(crypted))
print(i, j)
message = 'NOMMEZLESCODESSECRETSLESPLUSCELEBRESDELHISTOIRE'
c = VigenereCypher(key=[1, 2])
print(c.encrypt(message))
| [
"[email protected]"
] | |
b81111166f9a39b8a29e524ea7739667bfcca158 | 350db570521d3fc43f07df645addb9d6e648c17e | /0355_Design_Twitter/solution.py | 8d3edca5abd4d8fcb391e591c9019e0f9172a4af | [] | no_license | benjaminhuanghuang/ben-leetcode | 2efcc9185459a1dd881c6e2ded96c42c5715560a | a2cd0dc5e098080df87c4fb57d16877d21ca47a3 | refs/heads/master | 2022-12-10T02:30:06.744566 | 2022-11-27T04:06:52 | 2022-11-27T04:06:52 | 236,252,145 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,189 | py | '''
355. Design Twitter
Design a simplified version of Twitter where users can post tweets, follow/unfollow another user and is able
to see the 10 most recent tweets in the user's news feed. Your design should support the following methods:
postTweet(userId, tweetId): Compose a new tweet.
getNewsFeed(userId): Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.
follow(followerId, followeeId): Follower follows a followee.
unfollow(followerId, followeeId): Follower unfollows a followee.
'''
import collections
# https://www.hrwhisper.me/leetcode-design-twitter/
class Twitter(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.tweets_cnt = 0
self.tweets = collections.defaultdict(list)
self.follower_ship = collections.defaultdict(set)
def postTweet(self, userId, tweetId):
"""
Compose a new tweet.
:type userId: int
:type tweetId: int
:rtype: void
"""
self.tweets[userId].append([tweetId, self.tweets_cnt])
self.tweets_cnt += 1
def getNewsFeed(self, userId):
"""
Retrieve the 10 most recent tweet ids in the user's news feed.
Each item in the news feed must be posted by users who the user followed or by the user herself.
Tweets must be ordered from most recent to least recent.
:type userId: int
:rtype: List[int]
"""
recent_tweets = []
user_list = list(self.follower_ship[userId]) + [userId]
userId_tweet_index = [[userId, len(self.tweets[userId]) - 1] for userId in user_list if userId in self.tweets]
for _ in xrange(10):
max_index = max_tweet_id = max_user_id = -1
for i, (user_id, tweet_index) in enumerate(userId_tweet_index):
if tweet_index >= 0:
tweet_info = self.tweets[user_id][tweet_index]
if tweet_info[1] > max_tweet_id:
max_index, max_tweet_id, max_user_id = i, tweet_info[1], user_id
if max_index < 0: break
recent_tweets.append(self.tweets[max_user_id][userId_tweet_index[max_index][1]][0])
userId_tweet_index[max_index][1] -= 1
return recent_tweets
def follow(self, followerId, followeeId):
"""
Follower follows a followee. If the operation is invalid, it should be a no-op.
:type followerId: int
:type followeeId: int
:rtype: void
"""
if followerId != followeeId:
self.follower_ship[followerId].add(followeeId)
def unfollow(self, followerId, followeeId):
"""
Follower unfollows a followee. If the operation is invalid, it should be a no-op.
:type followerId: int
:type followeeId: int
:rtype: void
"""
if followerId in self.follower_ship and followeeId in self.follower_ship[followerId]:
self.follower_ship[followerId].remove(followeeId)
| [
"[email protected]"
] | |
95612479cdfabbcffd276a39d0b4137bbd5a1c0d | 62ef47b5e63042c46f2a5dd360bfb3dc65cc611f | /geomdl/vis.py | 5bf49d4e15e5a13a3f41e9ef6283fa2b9c3ceca2 | [
"MIT",
"Python-2.0"
] | permissive | Mopolino8/NURBS-Python | 3abdd7266312779cc1e860608c304b5703420ace | 009089b27b2a8308b1834ba41b429471346b2654 | refs/heads/master | 2020-04-17T22:29:48.418346 | 2019-01-18T17:09:19 | 2019-01-18T17:09:19 | 166,996,473 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,589 | py | """
.. module:: vis
:platform: Unix, Windows
:synopsis: Provides abstract base classes for visualization modules
.. moduleauthor:: Onur Rauf Bingol <[email protected]>
"""
import abc
import six
from typing import Any, Sequence, List
# Initialize an empty __all__ for controlling imports
__all__ = []
@six.add_metaclass(abc.ABCMeta)
class VisConfigAbstract(object):
""" Abstract base class for user configuration of the visualization module
Defines an abstract base for NURBS-Python visualization configuration.
"""
def __init__(self, **kwargs):
# type: (**Any) -> None
pass
@six.add_metaclass(abc.ABCMeta)
class VisAbstract(object):
""" Abstract base class for visualization
Defines an abstract base for NURBS-Python visualization modules.
:param config: configuration class
:type config: VisConfigAbstract
"""
def __init__(self, config):
# type: (VisConfigAbstract()) -> None
if not isinstance(config, VisConfigAbstract):
raise TypeError("Config variable must be an instance of vis.VisAbstractConfig")
self._user_config = config
self._module_config = {'ctrlpts': 'points', 'evalpts': 'points', 'others': None}
self._plots = [] # type: List[dict]
self._ctrlpts_offset = 0.0
def clear(self):
# type: () -> None
""" Clears the points, colors and names lists. """
self._plots[:] = []
def add(self, ptsarr, plot_type, name="", color=""):
# type: (Sequence[Sequence[float]], str, str, str) -> None
""" Adds points sets to the visualization instance for plotting.
:param ptsarr: control or evaluated points
:type ptsarr: list, tuple
:param plot_type: type of the plot, e.g. ctrlpts, evalpts, bbox, etc.
:type plot_type: str
:param name: name of the plot displayed on the legend
:type name: str
:param color: plot color
:type color: str
"""
# ptsarr can be a list, a tuple or an array
if ptsarr is None or len(ptsarr) == 0:
return
# Add points, size, plot color and name on the legend
elem = {'ptsarr': ptsarr, 'name': name, 'color': color, 'type': plot_type}
self._plots.append(elem)
@property
def vconf(self):
# type: () -> VisConfigAbstract()
""" User configuration class for visualization
:getter: Gets the user configuration class
:type: vis.VisConfigAbstract
"""
return self._user_config
@property
def mconf(self):
# type: () -> dict
""" Visualization module internal configuration directives
This property controls the internal configuration of the visualization module. It is for advanced use and
testing only.
The visualization module is mainly designed to plot the control points (*ctrlpts*) and the surface points
(*evalpts*). These are called as *plot types*. However, there is more than one way to plot the control points
and the surface points. For instance, a control points plot can be a scatter plot or a quad mesh, and a
surface points plot can be a scatter plot or a tessellated surface plot.
This function allows you to change the type of the plot, e.g. from scatter plot to tessellated surface plot.
On the other than, some visualization modules also defines some specialized classes for this purpose as it might
not be possible to change the type of the plot at the runtime due to visualization library internal API
differences (i.e. different backends for 2- and 3-dimensional plots).
By default, the following plot types and values are available:
Curve:
* For control points (*ctrlpts*): points
* For evaluated points (*evalpts*): points
Surface:
* For control points (*ctrlpts*): points, quads, quadmesh
* For evaluated points (*evalpts*): points, quads, triangles
Volume:
* For control points (*ctrlpts*): points
* For evaluated points (*evalpts*): points, voxels
:getter: Gets the visualization module configuration
:setter: Sets the visualization module configuration
"""
return self._module_config
@mconf.setter
def mconf(self, value):
# type: (Sequence[str]) -> None
if not isinstance(value[0], str) or not isinstance(value[1], str):
raise TypeError("Plot type and its value should be string type")
if value[0] not in self._module_config.keys():
raise KeyError(value[0] + " is not a configuration directive. Possible directives: " +
", ".join([k for k in self._module_config.keys()]))
self._module_config[value[0]] = value[1]
@property
def ctrlpts_offset(self):
# type: () -> float
""" Defines an offset value for the control points grid plots
Only makes sense to use with surfaces with dense control points grid.
:getter: Gets the offset value
:setter: Sets the offset value
:type: float
"""
return self._ctrlpts_offset
@ctrlpts_offset.setter
def ctrlpts_offset(self, offset_value):
# type: (float) -> None
self._ctrlpts_offset = float(offset_value)
def size(self, plot_type):
# type: (str) -> int
""" Returns the number of plots defined by the plot type.
:param plot_type: plot type
:type plot_type: str
:return: number of plots defined by the plot type
:rtype: int
"""
count = 0
for plot in self._plots:
if plot['type'] == plot_type:
count += 1
return count
def animate(self, **kwargs):
# type: (**Any) -> None
""" Generates animated plots (if supported).
If the implemented visualization module supports animations, this function will create an animated figure.
Otherwise, it will call :py:meth:`render` method by default.
"""
# Call render() by default
self.render(**kwargs)
@abc.abstractmethod
def render(self, **kwargs):
# type: (**Any) -> None
""" Abstract method for rendering plots of the point sets.
This method must be implemented in all subclasses of ``VisAbstract`` class.
"""
# We need something to plot
if self._plots is None or len(self._plots) == 0:
raise ValueError("Nothing to plot")
| [
"[email protected]"
] | |
81c2bbd50fbc15ba6b4e23ef213f426d6fd669ee | 1eaa6c2500868d0c60b5b2cd552cd671b635de32 | /Algorithm/sword of offer/14.链表中倒数第k个结点.py | cb3067b70c193a8dddb22fc816f0869477255b87 | [] | no_license | jiangyuwei666/my-study-demo | f85f14a599c328addb5af09078d404f1139e0a82 | 9e2baef2f36f071f8903768adb8d5a5a8c1123f6 | refs/heads/master | 2022-04-30T16:47:24.715570 | 2022-03-24T09:08:43 | 2022-03-24T09:08:43 | 152,565,041 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,436 | py | """
两个指针
首先两个指针都在头部,然后让第一个指针移动k-1个位置,此时第一个指针指向+k个位置的那个节点。
这个时候再同时移动两个节点当第一个指针移动到最后一个节点是时,第二个指针就指向了-k位置的节点。
"""
class ListNode:
def __init__(self, x):
self.x = x
self.next = None
def init_list(num_list):
node_list = []
for i in num_list:
node = ListNode(i)
node_list.append(node)
for j in range(len(node_list)):
if j == len(node_list) - 1:
return node_list[0]
node_list[j].next = node_list[j + 1]
def print_list(head):
result = []
while head:
result.append(head.x)
head = head.next
return result
def get_value(head, k):
"""
:param head: 传入链表的头节点
:param k:传入k
:return: 返回-k位置的值
"""
if k < 0:
return "你输你🐎个🔨"
first = head
second = head
for i in range(k - 1):
if not first:
return "k比他🐎滴链表还长"
first = first.next # 此时first指向第+k个元素
while first.next:
first = first.next
second = second.next
return second.x
head = init_list([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
print(get_value(head, 3))
print(get_value(head, -1))
print(get_value(head, 123))
print(get_value(head, 10))
| [
"[email protected]"
] | |
b63b7c3218e2814ed3fcd990e0e23b066dfd88dd | ecf77933549cb56ebde35df35556accc9684424d | /html_to_css.py | 1ddd48e7590e6bf5d37ee74ba9c3c1f7474dce08 | [] | no_license | jayd2446/cuda_html_ops | 3ba102fb87ba9f591fb9cd4ccad7dbc8fab53bf4 | 0fea8430aa057feafac167adbc18f08c647ef099 | refs/heads/master | 2020-05-31T12:18:38.938276 | 2018-08-31T09:35:20 | 2018-08-31T09:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | import re
from cudatext import *
REGEX1 = r'\bclass\s*=\s*"(.+?)"'
REGEX2 = r"\bclass\s*=\s*'(.+?)'"
def do_html_to_css_clipboard(compact):
text = ed.get_text_sel()
if not text: return
res = re.findall(REGEX1, text, 0) + re.findall(REGEX2, text, 0)
res = sorted(list(set(res)))
if not res:
msg_status('No CSS classes found')
app_proc(PROC_SET_CLIP, '')
return
text_in = ' ' if compact else '\n\n'
out = ['.'+name+' {'+text_in+'}\n' for name in res]
text = '\n'.join(out)+'\n'
app_proc(PROC_SET_CLIP, text)
msg_status('CSS styles ({}) copied to clipboard'.format(len(res)) )
| [
"[email protected]"
] | |
934c03e88433588a4a2cb7d674fb33c1b3da2a36 | d50bb3387316a4f1a06fe4c84714568a73b2a125 | /tasks/utils.py | b47f431f31a462efa50b11ae15be1623ac038375 | [] | no_license | pmav99/fastapi_docker_compose | 8830006876db35d43614a38945581f9868aa31fc | 491ad3e31fc9ef1ac4306624ba27c945733ac103 | refs/heads/master | 2022-12-09T16:33:19.924385 | 2020-07-30T20:22:01 | 2020-07-30T20:22:01 | 249,724,494 | 1 | 0 | null | 2022-12-08T03:52:43 | 2020-03-24T14:06:22 | Dockerfile | UTF-8 | Python | false | false | 209 | py | import contextlib
import os
import pathlib
@contextlib.contextmanager
def chdir(dirname: str):
curdir = os.getcwd()
try:
os.chdir(dirname)
yield
finally:
os.chdir(curdir)
| [
"[email protected]"
] | |
1ab6d8949c8d7742e9cf369e05fdc49f447c63d2 | 60096eba428275a28ab53d364aef0b9bc29e71c8 | /hris/api/users.py | 712b95f287ba42e8411e092d49989f3123ba0dca | [] | no_license | RobusGauli/hris_new | 30ef8d17aceceb5f6c8f69f65df508228cb31f33 | 634f18d162310df9331543f7a877cac619ee1622 | refs/heads/master | 2021-01-19T21:55:39.279378 | 2017-04-29T04:32:38 | 2017-04-29T04:32:38 | 88,724,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,091 | py | from hris.utils import hash_password, gen_access_token, decode_access_token
from flask import request, abort, jsonify, g
from functools import wraps
from hris.api import api
from sqlalchemy.exc import IntegrityError #foreign key violation #this won't come up oftern
from sqlalchemy.orm.exc import NoResultFound
from hris import db_session
#auth
###
from hris.models import (
User,
CompanyDetail,
Employee
)
from hris.api.response_envelop import (
records_json_envelop,
record_exists_envelop,
record_json_envelop,
record_created_envelop,
record_notfound_envelop,
record_updated_envelop,
record_not_updated_env,
fatal_error_envelop,
missing_keys_envelop,
length_require_envelop
)
from hris.api.auth import (
allow_permission,
create_update_permission,
read_permission
)
@api.route('/users', methods=['POST'])
def register_user():
'''This view register the user by generating ht access token with the given role'''
if request.args and request.args['action'] == 'register':
#check if all key existst
if not set(request.json.keys()) == {'user_name', 'password', 'role_id'}:
return jsonify({'message' : 'missing keys'})
#lower case the user_name
if any(len(val.strip()) < 5 for val in request.json.values() if isinstance(val, str)):
return jsonify({'message' : 'Not adequate length of values'})
#lower case the user_name
user_name = request.json['user_name'].strip().lower()
role_id = request.json['role_id']
hashed_pass = hash_password(request.json['password'].encode())
#get the user access_token
user_access_token = gen_access_token(role_id, user_name)
user = User(user_name=user_name, password=hashed_pass, role_id=role_id, access_token=user_access_token.decode('utf-8'))
try:
db_session.add(user)
db_session.commit()
except IntegrityError as ie:
#hadle the error here
return record_exists_envelop()
else:
return jsonify({'message' : 'user_added_successfully', 'access_token' : user_access_token.decode('utf-8')})
elif request.args['action'] == 'login':
if request.json:
if not set(request.json.keys()) == {'user_name', 'password'}:
return jsonify({'message' : 'missing keys'})
else:
return jsonify({'message': 'json object'})
user_name = request.json['user_name']
password = request.json['password']
#now hass the password
hashed_pass = hash_password(password)
#get the user from the users for the password and user name
try:
user = db_session.query(User).filter(User.user_name==user_name).one()
if not user:
return record_notfound_envelop('User doesn\'t exists')
#if there is user check for the password
if hashed_pass == user.password:
return record_json_envelop({'access_token' : user.access_token, 'activate' : user.activate, 'role_id' : user.role_id, 'permissions' : user.role.to_dict()})
else:
return record_notfound_envelop('Password doesn\'t match')
except NoResultFound as e:
return record_notfound_envelop('User doesn\'t exists')
###to register the user with the employee
elif request.args['action'] == 'registeruserforemployee':
if not request.args.get('e_id', None):
return 'please send the e_id'
e_id = int(request.args['e_id'])
if not set(request.json.keys()) == {'user_name', 'password', 'role_id'}:
return jsonify({'message' : 'missing keys'})
#lower case the user_name
if any(len(val.strip()) < 5 for val in request.json.values() if isinstance(val, str)):
return jsonify({'message' : 'Not adequate length of values'})
#lower case the user_name
user_name = request.json['user_name'].strip().lower()
role_id = request.json['role_id']
hashed_pass = hash_password(request.json['password'].encode())
#get the user access_token
user_access_token = gen_access_token(role_id, user_name)
user = User(user_name=user_name, password=hashed_pass, role_id=role_id, access_token=user_access_token.decode('utf-8'))
try:
emp = db_session.query(Employee).filter(Employee.id==e_id).one()
db_session.add(user)
emp.user = user
db_session.add(emp)
db_session.commit()
except IntegrityError as ie:
#hadle the error here
return record_exists_envelop()
except NoResultFound as e:
return record_notfound_envelop()
else:
return jsonify({'message' : 'user_added_successfully', 'access_token' : user_access_token.decode('utf-8'), 'status': 'success'})
@api.route('/company', methods=['POST'])
def add_company_detail():
if not set(request.json.keys()) == {'name', 'currency_symbol', 'is_prefix', 'country', 'description'}:
return missing_keys_envelop()
if len(request.json['name']) < 4 or len(request.json['country']) < 3 or len(request.json['currency_symbol']) < 1:
return length_require_envelop()
#now shape up the fields
name = request.json['name'].strip()
currency_symbol = request.json['currency_symbol'].lower().strip()
is_prefix = request.json['is_prefix']
country = request.json['country'].strip()
des = request.json['description'].strip()
company = CompanyDetail(name=name, currency_symbol=currency_symbol, is_prefix=is_prefix, country=country, description=des)
try:
db_session.add(company)
db_session.commit()
except IntegrityError as e:
return record_exists_envelop()
else:
return record_created_envelop(request.json)
@api.route('/users', methods = ['GET'])
@read_permission('read_management_perm')
def get_users():
try:
users = db_session.query(User).filter(User.user_name != 'admin').all()
except NoResultFound as e:
return record_notfound_envelop()
except Exception as e:
return fatal_error_envelop()
else:
return records_json_envelop(list(user.to_dict() for user in users))
@api.route('/users/<int:u_id>', methods=['PUT'])
@create_update_permission('user_management_perm')
def update_user(u_id):
if not request.json:
abort(400)
if not request.args.get('action') == 'update_role':
if 'password' not in request.json.keys():
return missing_keys_envelop()
try:
user = db_session.query(User).filter(User.id==u_id).one()
if user is None:
return record_notfound_envelop()
hashed_pass = hash_password(request.json['password'].encode())
old_hashed_pass = user.password
if old_hashed_pass == hashed_pass:
return jsonify({'message' : 'Please dont\'t use old password', 'status': 'fail'})
else:
user.password = hashed_pass
db_session.add(user)
db_session.commit()
except NoResultFound as e:
return record_notfound_envelop()
except Exception as e:
return fatal_error_envelop()
else:
return record_updated_envelop('Password updated Successfully.')
#update the role
if 'role_id' not in request.json:
return missing_keys_envelop()
try:
user = db_session.query(User).filter(User.id==u_id).one()
if user is None:
return record_notfound_envelop()
user.role_id = int(request.json['role_id'])
db_session.add(user)
db_session.commit()
except NoResultFound as e:
return record_notfound_envelop()
except Exception as e:
raise
return fatal_error_envelop()
else:
return record_updated_envelop('Role updated successfully.')
| [
"[email protected]"
] | |
6c81044eca74f7f61211a98dc724ffa9c7c7969a | a367a015dbc36287ca933955ded1ee58b5a2a61a | /swagger_client/api/v1fertilizer_to_registration_api.py | 96ecf2b9f7ded845fb1b9fc0b13746f187ed7549 | [] | no_license | kerniee/inno_intership_1_test_task | 70211e153450011c427df595a02e3574dfe7ed9f | fc0619ef54b00806a3b59f3c07c1c1684682d65b | refs/heads/master | 2023-05-23T02:24:40.083723 | 2021-06-21T16:15:04 | 2021-06-21T16:15:04 | 365,855,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,286 | py | # coding: utf-8
"""
Teleagronom
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class V1fertilizerToRegistrationApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def fertilizer_to_registration_list(self, **kwargs): # noqa: E501
"""fertilizer_to_registration_list # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.fertilizer_to_registration_list(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str distinct: Example: ?distinct
:param str filter: ?filter={\"id\":1}&filter={\"id\":[1,2,9],\"text\":[\"any\",\"text\"]}
:param int page: A page number within the paginated result set.
:param int page_size: Number of results to return per page.
:param str sort: Example: ?sort=[\"id\",\"ASC\"]&sort=[\"name\",\"DESC\"]&sort=[\"title\"] default: DESC
:return: PaginatedFertilizerToRegistrationList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.fertilizer_to_registration_list_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.fertilizer_to_registration_list_with_http_info(**kwargs) # noqa: E501
return data
def fertilizer_to_registration_list_with_http_info(self, **kwargs): # noqa: E501
"""fertilizer_to_registration_list # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.fertilizer_to_registration_list_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str distinct: Example: ?distinct
:param str filter: ?filter={\"id\":1}&filter={\"id\":[1,2,9],\"text\":[\"any\",\"text\"]}
:param int page: A page number within the paginated result set.
:param int page_size: Number of results to return per page.
:param str sort: Example: ?sort=[\"id\",\"ASC\"]&sort=[\"name\",\"DESC\"]&sort=[\"title\"] default: DESC
:return: PaginatedFertilizerToRegistrationList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['distinct', 'filter', 'page', 'page_size', 'sort'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method fertilizer_to_registration_list" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'distinct' in params:
query_params.append(('distinct', params['distinct'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'page_size' in params:
query_params.append(('page_size', params['page_size'])) # noqa: E501
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['jwtAuth', 'tokenAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/fertilizer/fertilizer_to_registration/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PaginatedFertilizerToRegistrationList', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def fertilizer_to_registration_retrieve(self, id, **kwargs): # noqa: E501
"""fertilizer_to_registration_retrieve # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.fertilizer_to_registration_retrieve(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: A unique integer value identifying this Регистрация удобрения. (required)
:return: FertilizerToRegistration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.fertilizer_to_registration_retrieve_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.fertilizer_to_registration_retrieve_with_http_info(id, **kwargs) # noqa: E501
return data
def fertilizer_to_registration_retrieve_with_http_info(self, id, **kwargs): # noqa: E501
"""fertilizer_to_registration_retrieve # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.fertilizer_to_registration_retrieve_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: A unique integer value identifying this Регистрация удобрения. (required)
:return: FertilizerToRegistration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method fertilizer_to_registration_retrieve" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `fertilizer_to_registration_retrieve`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['jwtAuth', 'tokenAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/fertilizer/fertilizer_to_registration/{id}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FertilizerToRegistration', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"[email protected]"
] | |
c2f6f2417db54c1403f0fd7961a24ede9f71b21c | 76b5be6d12c6885c8cb9ae458bf878a3dcf0401c | /DojoAssignments/Python2/PythonAssignments/Django/DjangoIntro/SessionWords/apps/session_words_app/views.py | 539759b21089b070bef198b2336a8eced707120c | [] | no_license | DaseinUXD/CodingDojo | ba1d532750d61a21feb401243c49e05623e9b8c2 | 19b2d0f0ce9f8c9d08747438412e5c988073f385 | refs/heads/master | 2020-03-11T16:36:51.312297 | 2018-09-19T22:32:09 | 2018-09-19T22:32:09 | 130,121,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect, HttpResponse
# Create your views here.
# Index view
def index(request):
return render(request, 'session_words_app/index.html')
def add(request):
if request.method=="POST":
return redirect('/')
else:
# do something else
return redirect('/')
def clear(request):
return redirect('/') | [
"[email protected]"
] | |
79aa73692fd55784d617c6924ab841e36efee841 | fd97689f062e6d90837ea27b9a5e3de87bcd1e92 | /Servidor/MET-Server-udp.py | c79bc598c86aa8500e06b913a44a9d4c03e14fb1 | [] | no_license | Edresson/MET | 9f7b8a43bdea29ee844d0c98a20f0aef4afbcdd2 | 5945116d0d52fdf8f892a5f266bf6b51afb529eb | refs/heads/master | 2023-08-31T10:18:35.942324 | 2019-10-29T12:17:15 | 2019-10-29T12:17:15 | 93,848,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,525 | py | # -*- coding: utf-8 -*-
from threading import Thread
from PyQt4 import QtTest
from time import time
import time
from socket import *
import Motor
import celula
serverSocketMotor = socket(AF_INET, SOCK_DGRAM)
serverSocketMotor.bind(('', 12001))
serverSocketCelula = socket(AF_INET, SOCK_DGRAM)
serverSocketCelula.bind(('', 12002))
class thread_motor(Thread):
def run(self):
global freq_botao
inicio= time.time()
while True:
fim = time.time()
if fim - inicio > 6:
Motor.Parar()
message, address = serverSocketMotor.recvfrom(1024)
message = str(message).replace("b'",'')
message = message.replace("'",'')
if message == "1":
inicio = time.time()
if message == "STOP":
print('parar')
serverSocketMotor.sendto(B'1', address)
Motor.Parar()
elif message == "ping":
print('p')
serverSocketMotor.sendto(B'ping', address)
elif message[0:2] =='fb':
print('fb')
string,freqfb = message.split(':')
Motor.freq_botao = float(freqfb)
elif message[0:3] =='SeD':
print('SeD')
serverSocketMotor.sendto(B'1', address)
string,vel,freq,controle,deslocamento = message.split(':')
Motor.Subir_descer(float(vel),float(freq),int(controle),float(deslocamento))
elif message[0:3] =='Cal':
print('cal')
serverSocketMotor.sendto(B'1', address)
cmd,valor,freq = message.split(':')
Motor.calcular(float(valor),float(freq))
elif message =="SUBIR":
print('Subir')
serverSocketMotor.sendto(B'1', address)
Motor.subir()
elif message =="BAIXAR":
print('Baixar')
####print(message)
serverSocketMotor.sendto(B'1', address)
Motor.baixar()
class thread_celula(Thread):
def run(self):
while True:
message, address = serverSocketCelula.recvfrom(1024)
message = str(message).replace("b'",'')
message = message.replace("'",'')
if message == "gv":
val = str(celula.getvalue())
val =val.encode()
serverSocketCelula.sendto(val, address)
elif message == "ping":
serverSocketMotor.sendto(B'ping', address)
elif message =='ca':
val = str(celula.calibrar())
val =val.encode()
serverSocketCelula.sendto(val, address)
elif message[0:3] =='ini':
serverSocketCelula.sendto(B'1', address)
cmd,valor = message.split(':')
celula.iniciarcel(float(valor))
elif message =="tr":
serverSocketCelula.sendto(B'1', address)
celula.tare()
thrCelula=thread_celula()
thrCelula.start()
thrMotor = thread_motor()
thrMotor.start()
| [
"[email protected]"
] | |
54203602ebefeb4d7e49a6c2cd32adf327c9e6e9 | df20dc807d2d9ba666377a2a23cbe80b268c75cd | /0 Python Fundamental/32a5_readCSVasDICT_noCSVpkg.py | 70ff4ebfcfdaa9b57a22e5f3a9a52a19d38883f8 | [] | no_license | stevenwongso/Python_Fundamental_DataScience | e9bb294017d0fcc05e2079f79f880ac8be726e11 | 706c61c8bdfcec1461328fa7a58a55a2d0f9f2d8 | refs/heads/master | 2021-01-04T07:32:11.856979 | 2020-02-13T05:18:06 | 2020-02-13T05:18:06 | 240,449,486 | 0 | 1 | null | 2020-02-14T07:12:16 | 2020-02-14T07:12:15 | null | UTF-8 | Python | false | false | 252 | py | # read csv as dict without csv package
myfile = open('32a4.csv', 'r')
data = []
for i in myfile.readlines()[1:]:
no = int(i.split(';')[0])
nama = i.split(';')[1].replace('\n','')
x = {'no': no, 'nama': nama}
data.append(x)
print(data) | [
"[email protected]"
] | |
86ee833065018990ffb7d10de8b4eae90e0400fa | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Scripts/weather3.py | da29540c7ea77779f74cf0c9f89c0af03f26e5b3 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:01477c0dba3b4f14c6e88c20abc1a741e612745ccaadd12514f86251fcb26f5d
size 1382
| [
"[email protected]"
] | |
c2c3b84b358107d0e1be19f8975fcf001efefb02 | 9147a96572715604a31b6c026b7608b6d26276e8 | /cfn_model/model/IAMManagedPolicy.py | 4d24b42f6b0516a2f905011d5e7faab8fef05b51 | [
"MIT"
] | permissive | rubelw/cloudformation-validator | 9890c024174640c79914f2f8bd153dc2900fc078 | 4ba3b05ae3abd3a941aa6a34419c594d8e0d0e5d | refs/heads/master | 2020-03-23T18:31:29.084701 | 2019-01-17T21:55:04 | 2019-01-17T21:55:04 | 141,914,515 | 6 | 1 | NOASSERTION | 2019-01-17T21:55:05 | 2018-07-22T16:58:55 | Python | UTF-8 | Python | false | false | 699 | py | from __future__ import absolute_import, division, print_function
from cfn_model.model.ModelElement import ModelElement
class IAMManagedPolicy(ModelElement):
"""
IAM managed policy model
"""
def __init__(self, cfn_model):
"""
Initialize
:param cfn_model:
"""
ModelElement.__init__(self,cfn_model)
self.groups = []
self.roles = []
self.users = []
self.policy_document=None
self.resource_type= 'AWS::IAM::ManagedPolicy'
def policy_document(self, document):
"""
Set the policy document
:param document:
:return:
"""
self.policy_document=document
| [
"[email protected]"
] | |
5591a8edf408ec43904d5a0e73a02795dc193eee | a183a600e666b11331d9bd18bcfe1193ea328f23 | /pdt/core/admin/__init__.py | 58e1d13908fc242d51b379fa54a382780b919b22 | [
"MIT"
] | permissive | AbdulRahmanAlHamali/pdt | abebc9cae04f4afa1fc31b87cbf4b981affdca62 | 5c32aab78e48b5249fd458d9c837596a75698968 | refs/heads/master | 2020-05-15T07:51:09.877614 | 2015-12-01T18:22:56 | 2015-12-01T18:22:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | """PDT core admin interface."""
from .case import CaseAdmin # NOQA
from .case_category import CaseCategoryAdmin # NOQA
from .case_edit import CaseEditAdmin # NOQA
from .ci_project import CIProjectAdmin # NOQA
from .deployment_report import DeploymentReportAdmin # NOQA
from .instance import InstanceAdmin # NOQA
from .migration import MigrationAdmin # NOQA
from .migration_report import MigrationReportAdmin # NOQA
from .release import ReleaseAdmin # NOQA
from .notification import NotificationTemplateAdmin # NOQA
| [
"[email protected]"
] | |
01e5ffbf994198ee773823897db4431035f17668 | 4e5141121d8b4015db233cbc71946ec3cfbe5fe6 | /samples/basic/crud/models/cisco-ios-xr/Cisco-IOS-XR-lib-keychain-cfg/nc-create-xr-lib-keychain-cfg-20-ydk.py | 5c5384fcd79c34f4269bfd0585bb229e990d76bd | [
"Apache-2.0"
] | permissive | itbj/ydk-py-samples | 898c6c9bad9d6f8072892300d42633d82ec38368 | c5834091da0ebedbb11af7bbf780f268aad7040b | refs/heads/master | 2022-11-20T17:44:58.844428 | 2020-07-25T06:18:02 | 2020-07-25T06:18:02 | 282,382,442 | 1 | 0 | null | 2020-07-25T06:04:51 | 2020-07-25T06:04:50 | null | UTF-8 | Python | false | false | 3,690 | py | #!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Create configuration for model Cisco-IOS-XR-lib-keychain-cfg.
usage: nc-create-xr-lib-keychain-cfg-20-ydk.py [-h] [-v] device
positional arguments:
device NETCONF device (ssh://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.services import CRUDService
from ydk.providers import NetconfServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_lib_keychain_cfg \
as xr_lib_keychain_cfg
import logging
def config_keychains(keychains):
"""Add config data to keychains object."""
keychain = keychains.Keychain()
keychain.chain_name = "CHAIN1"
key = keychain.macsec_keychain.macsec_keys.MacsecKey()
key.key_id = "10"
key.macsec_key_string = key.MacsecKeyString()
key.macsec_key_string.string = "101E584B5643475D5B547B79777C6663754356445055030F0F03055C504C430F0F"
key.macsec_key_string.cryptographic_algorithm = xr_lib_keychain_cfg.MacsecCryptoAlg.aes_128_cmac
key.macsec_key_string.encryption_type = xr_lib_keychain_cfg.MacsecEncryption.type7
key.macsec_lifetime = key.MacsecLifetime()
key.macsec_lifetime.start_hour = 0
key.macsec_lifetime.start_minutes = 0
key.macsec_lifetime.start_seconds = 0
key.macsec_lifetime.start_date = 1
key.macsec_lifetime.start_month = xr_lib_keychain_cfg.KeyChainMonth.jan
key.macsec_lifetime.start_year = 2017
key.macsec_lifetime.infinite_flag = True
keychain.macsec_keychain.macsec_keys.macsec_key.append(key)
keychains.keychain.append(keychain)
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="NETCONF device (ssh://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create NETCONF provider
provider = NetconfServiceProvider(address=device.hostname,
port=device.port,
username=device.username,
password=device.password,
protocol=device.scheme)
# create CRUD service
crud = CRUDService()
keychains = xr_lib_keychain_cfg.Keychains() # create object
config_keychains(keychains) # add object configuration
# create configuration on NETCONF device
crud.create(provider, keychains)
exit()
# End of script
| [
"[email protected]"
] | |
6cc0e33277b45a17ffd8b11d609e36880c5ac4b5 | 5a281cb78335e06c631181720546f6876005d4e5 | /blazar-3.0.0/blazar/api/v1/utils.py | c6c359945627b31672386b79cba056931827683b | [
"Apache-2.0"
] | permissive | scottwedge/OpenStack-Stein | d25b2a5bb54a714fc23f0ff0c11fb1fdacad85e8 | 7077d1f602031dace92916f14e36b124f474de15 | refs/heads/master | 2021-03-22T16:07:19.561504 | 2020-03-15T01:31:10 | 2020-03-15T01:31:10 | 247,380,811 | 0 | 0 | Apache-2.0 | 2020-03-15T01:24:15 | 2020-03-15T01:24:15 | null | UTF-8 | Python | false | false | 9,523 | py | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
import flask
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from werkzeug import datastructures
from blazar.api import context
from blazar.db import exceptions as db_exceptions
from blazar import exceptions as ex
from blazar.i18n import _
from blazar.manager import exceptions as manager_exceptions
from blazar.utils.openstack import exceptions as opst_exceptions
LOG = logging.getLogger(__name__)
class Rest(flask.Blueprint):
"""REST helper class."""
def __init__(self, *args, **kwargs):
super(Rest, self).__init__(*args, **kwargs)
self.url_prefix = kwargs.get('url_prefix', None)
self.routes_with_query_support = []
def get(self, rule, status_code=200, query=False):
return self._mroute('GET', rule, status_code, query=query)
def post(self, rule, status_code=201):
return self._mroute('POST', rule, status_code)
def put(self, rule, status_code=200):
return self._mroute('PUT', rule, status_code)
def delete(self, rule, status_code=204):
return self._mroute('DELETE', rule, status_code)
def _mroute(self, methods, rule, status_code=None, **kw):
"""Route helper method."""
if type(methods) is str:
methods = [methods]
return self.route(rule, methods=methods, status_code=status_code, **kw)
def route(self, rule, **options):
"""Routes REST method and its params to the actual request."""
status = options.pop('status_code', None)
file_upload = options.pop('file_upload', False)
query = options.pop('query', False)
def decorator(func):
endpoint = options.pop('endpoint', func.__name__)
def handler(**kwargs):
LOG.debug("Rest.route.decorator.handler, kwargs=%s", kwargs)
_init_resp_type(file_upload)
# update status code
if status:
flask.request.status_code = status
if flask.request.method in ['POST', 'PUT']:
kwargs['data'] = request_data()
if flask.request.endpoint in self.routes_with_query_support:
params = {k: v for k, v in get_request_args().items()}
kwargs['query'] = params
with context.ctx_from_headers(flask.request.headers):
try:
return func(**kwargs)
except ex.BlazarException as e:
return bad_request(e)
except messaging.RemoteError as e:
# Get the exception from manager and common exceptions
cls = getattr(manager_exceptions, e.exc_type,
getattr(ex, e.exc_type, None))
cls = cls or getattr(opst_exceptions, e.exc_type,
getattr(ex, e.exc_type, None))
if cls is not None:
return render_error_message(cls.code, e.value,
cls.code)
else:
# Get the exception from db exceptions and hide
# the message because could contain table/column
# information
cls = getattr(db_exceptions, e.exc_type, None)
if cls is not None:
return render_error_message(
cls.code,
'{0}: A database error occurred'.format(
cls.__name__),
cls.code)
else:
# We obfuscate all Exceptions
# but Blazar ones for
# security reasons
err = 'Internal Server Error'
return internal_error(500, err, e)
except Exception as e:
return internal_error(500, 'Internal Server Error', e)
if query:
self.routes_with_query_support.append(
'.'.join([self.name, endpoint]))
self.add_url_rule(rule, endpoint, handler, **options)
self.add_url_rule(rule + '.json', endpoint, handler, **options)
return func
return decorator
RT_JSON = datastructures.MIMEAccept([("application/json", 1)])
def _init_resp_type(file_upload):
"""Extracts response content type."""
# get content type from Accept header
resp_type = flask.request.accept_mimetypes
# url /foo.json
if flask.request.path.endswith('.json'):
resp_type = RT_JSON
flask.request.resp_type = resp_type
# set file upload flag
flask.request.file_upload = file_upload
def render(result=None, response_type=None, status=None, **kwargs):
"""Render response to return."""
if not result:
result = {}
if type(result) is dict:
result.update(kwargs)
elif kwargs:
# can't merge kwargs into the non-dict res
abort_and_log(500,
_("Non-dict and non-empty kwargs passed to render."))
return
status_code = getattr(flask.request, 'status_code', None)
if status:
status_code = status
if not status_code:
status_code = 200
if not response_type:
response_type = getattr(flask.request, 'resp_type', RT_JSON)
serializer = None
if "application/json" in response_type:
response_type = RT_JSON
serializer = jsonutils
else:
abort_and_log(400,
_("Content type '%s' isn't supported") % response_type)
return
body = serializer.dump_as_bytes(result)
response_type = str(response_type)
return flask.Response(response=body, status=status_code,
mimetype=response_type)
def request_data():
"""Method called to process POST and PUT REST methods."""
if hasattr(flask.request, 'parsed_data'):
return flask.request.parsed_data
if not flask.request.content_length > 0:
LOG.debug("Empty body provided in request")
return dict()
if flask.request.file_upload:
return flask.request.data
deserializer = None
content_type = flask.request.mimetype
if not content_type or content_type in RT_JSON:
deserializer = jsonutils
else:
abort_and_log(400,
_("Content type '%s' isn't supported") % content_type)
return
# parsed request data to avoid unwanted re-parsings
parsed_data = deserializer.loads(flask.request.data)
flask.request.parsed_data = parsed_data
return flask.request.parsed_data
def get_request_args():
return flask.request.args
def abort_and_log(status_code, descr, exc=None):
"""Process occurred errors."""
LOG.error("Request aborted with status code %(code)s and "
"message '%(msg)s'", {'code': status_code, 'msg': descr})
if exc is not None:
LOG.error(traceback.format_exc())
flask.abort(status_code, description=descr)
def render_error_message(error_code, error_message, error_name):
"""Render nice error message."""
message = {
"error_code": error_code,
"error_message": error_message,
"error_name": error_name
}
resp = render(message)
resp.status_code = error_code
return resp
def internal_error(status_code, descr, exc=None):
"""Called if internal error occurred."""
LOG.error("Request aborted with status code %(code)s "
"and message '%(msg)s'", {'code': status_code, 'msg': descr})
if exc is not None:
LOG.error(traceback.format_exc())
error_code = "INTERNAL_SERVER_ERROR"
if status_code == 501:
error_code = "NOT_IMPLEMENTED_ERROR"
return render_error_message(status_code, descr, error_code)
def bad_request(error):
"""Called if Blazar exception occurred."""
if not error.code:
error.code = 400
LOG.debug("Validation Error occurred: error_code=%(code)s, "
"error_message=%(msg)s, error_name=%(name)s",
{'code': error.code, 'msg': str(error), 'name': error.code})
return render_error_message(error.code, str(error), error.code)
def not_found(error):
"""Called if object was not found."""
if not error.code:
error.code = 404
LOG.debug("Not Found exception occurred: error_code=%(code)s, "
"error_message=%(msg)s, error_name=%(name)s",
{'code': error.code, 'msg': str(error), 'name': error.code})
return render_error_message(error.code, str(error), error.code)
| [
"Wayne [email protected]"
] | Wayne [email protected] |
889eb827f803875363cea68f5952d72e22de0ae9 | 3db5eeeb0d34e7f093a9f3d9750c270df4ba3845 | /blog/admin.py | a4138cc33360d508be75461be64727fa3c2f6d3d | [] | no_license | DeepakDarkiee/stackunderflow | b0d052d2b1ef62dbb948a2789abfb80fd097191b | d68161e5729bdb8033f5ae0c28379b1e89c31044 | refs/heads/master | 2022-10-13T10:16:34.104129 | 2020-06-04T07:44:38 | 2020-06-04T07:44:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,086 | py | from django.contrib import admin
from .models import Post, Comment ,Category,Contact
from django_summernote.admin import SummernoteModelAdmin
class PostAdmin(SummernoteModelAdmin):
list_display = ('title', 'slug', 'status', 'category', 'created_on')
list_filter = ('status', 'created_on' ,'category')
search_fields = ['title', 'content']
summernote_fields = ('content')
prepopulated_fields = {'slug': ('title',)}
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ('name', 'body', 'post', 'created_on', 'active')
list_filter = ('active', 'created_on')
search_fields = ('name', 'email', 'body')
actions = ['approve_comments']
def approve_comments(self, request, queryset):
queryset.update(active=True)
class ContactAdmin(admin.ModelAdmin):
list_display = ('name','email','Content', 'created_on')
list_filter = ('email','created_on')
search_fields = ('name', 'email', 'body')
admin.site.register(Post, PostAdmin)
admin.site.register(Category)
admin.site.register(Contact,ContactAdmin) | [
"[email protected]"
] | |
80ed4fcaa32b7bbe66686eaeffe9a665e2afbf26 | 629a62dc600b356e55b25b21c93d088f5bc8aa64 | /source/webapp/forms.py | 3710a393967147c566da58a87bcfcf5247950ce3 | [] | no_license | Azer-Denker/ex_9 | 70e70408355d602ff11817d6cc53d53c0f1e3b1f | bfb8272ebb1764a9a3b382f2cabb19778d5f5541 | refs/heads/main | 2023-05-04T23:41:22.362929 | 2021-05-29T13:05:06 | 2021-05-29T13:05:06 | 306,867,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | from django import forms
from webapp.models import Photo, Album
class PhotoForm(forms.ModelForm):
class Meta:
model = Photo
fields = ['photo_img', 'signature', 'album', 'status']
class AlbumForm(forms.ModelForm):
class Meta:
model = Album
fields = ['name', 'description']
| [
"[email protected]"
] | |
26e9f1206b06f85917f4e41c797556210b4773d7 | 57dc9f8e67bb396ce6c7a96c699e943825b8cebe | /slowfast/models/video_model_builder.py | 9c529ef68386403c571a84e84958fccd62f5366f | [
"Apache-2.0"
] | permissive | akshaybankapure/SlowFast | 35cd914b9b0b64bf9d26c46a7e416ad30085e8bf | 57daed622e8bb4af7cb6d1d9fed4395bc19dfa50 | refs/heads/master | 2022-07-05T01:04:56.543577 | 2020-05-24T03:00:19 | 2020-05-24T03:00:19 | 266,459,214 | 0 | 0 | Apache-2.0 | 2020-05-24T02:59:30 | 2020-05-24T02:59:29 | null | UTF-8 | Python | false | false | 22,107 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Video models."""
import torch
import torch.nn as nn
import slowfast.utils.weight_init_helper as init_helper
from slowfast.models.batchnorm_helper import get_norm
from . import head_helper, resnet_helper, stem_helper
from .build import MODEL_REGISTRY
# Number of blocks for different stages given the model depth.
_MODEL_STAGE_DEPTH = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)}
# Basis of temporal kernel sizes for each of the stage.
_TEMPORAL_KERNEL_BASIS = {
"c2d": [
[[1]], # conv1 temporal kernel.
[[1]], # res2 temporal kernel.
[[1]], # res3 temporal kernel.
[[1]], # res4 temporal kernel.
[[1]], # res5 temporal kernel.
],
"c2d_nopool": [
[[1]], # conv1 temporal kernel.
[[1]], # res2 temporal kernel.
[[1]], # res3 temporal kernel.
[[1]], # res4 temporal kernel.
[[1]], # res5 temporal kernel.
],
"i3d": [
[[5]], # conv1 temporal kernel.
[[3]], # res2 temporal kernel.
[[3, 1]], # res3 temporal kernel.
[[3, 1]], # res4 temporal kernel.
[[1, 3]], # res5 temporal kernel.
],
"i3d_nopool": [
[[5]], # conv1 temporal kernel.
[[3]], # res2 temporal kernel.
[[3, 1]], # res3 temporal kernel.
[[3, 1]], # res4 temporal kernel.
[[1, 3]], # res5 temporal kernel.
],
"slow": [
[[1]], # conv1 temporal kernel.
[[1]], # res2 temporal kernel.
[[1]], # res3 temporal kernel.
[[3]], # res4 temporal kernel.
[[3]], # res5 temporal kernel.
],
"slowfast": [
[[1], [5]], # conv1 temporal kernel for slow and fast pathway.
[[1], [3]], # res2 temporal kernel for slow and fast pathway.
[[1], [3]], # res3 temporal kernel for slow and fast pathway.
[[3], [3]], # res4 temporal kernel for slow and fast pathway.
[[3], [3]], # res5 temporal kernel for slow and fast pathway.
],
}
_POOL1 = {
"c2d": [[2, 1, 1]],
"c2d_nopool": [[1, 1, 1]],
"i3d": [[2, 1, 1]],
"i3d_nopool": [[1, 1, 1]],
"slow": [[1, 1, 1]],
"slowfast": [[1, 1, 1], [1, 1, 1]],
}
class FuseFastToSlow(nn.Module):
"""
Fuses the information from the Fast pathway to the Slow pathway. Given the
tensors from Slow pathway and Fast pathway, fuse information from Fast to
Slow, then return the fused tensors from Slow and Fast pathway in order.
"""
def __init__(
self,
dim_in,
fusion_conv_channel_ratio,
fusion_kernel,
alpha,
eps=1e-5,
bn_mmt=0.1,
inplace_relu=True,
norm_module=nn.BatchNorm3d,
):
"""
Args:
dim_in (int): the channel dimension of the input.
fusion_conv_channel_ratio (int): channel ratio for the convolution
used to fuse from Fast pathway to Slow pathway.
fusion_kernel (int): kernel size of the convolution used to fuse
from Fast pathway to Slow pathway.
alpha (int): the frame rate ratio between the Fast and Slow pathway.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(FuseFastToSlow, self).__init__()
self.conv_f2s = nn.Conv3d(
dim_in,
dim_in * fusion_conv_channel_ratio,
kernel_size=[fusion_kernel, 1, 1],
stride=[alpha, 1, 1],
padding=[fusion_kernel // 2, 0, 0],
bias=False,
)
self.bn = norm_module(
num_features=dim_in * fusion_conv_channel_ratio,
eps=eps,
momentum=bn_mmt,
)
self.relu = nn.ReLU(inplace_relu)
def forward(self, x):
x_s = x[0]
x_f = x[1]
fuse = self.conv_f2s(x_f)
fuse = self.bn(fuse)
fuse = self.relu(fuse)
x_s_fuse = torch.cat([x_s, fuse], 1)
return [x_s_fuse, x_f]
@MODEL_REGISTRY.register()
class SlowFast(nn.Module):
"""
SlowFast model builder for SlowFast network.
Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He.
"SlowFast networks for video recognition."
https://arxiv.org/pdf/1812.03982.pdf
"""
def __init__(self, cfg):
"""
The `__init__` method of any subclass should also contain these
arguments.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
super(SlowFast, self).__init__()
self.norm_module = get_norm(cfg)
self.enable_detection = cfg.DETECTION.ENABLE
self.num_pathways = 2
self._construct_network(cfg)
init_helper.init_weights(
self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN
)
def _construct_network(self, cfg):
"""
Builds a SlowFast model. The first pathway is the Slow pathway and the
second pathway is the Fast pathway.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
assert cfg.MODEL.ARCH in _POOL1.keys()
pool_size = _POOL1[cfg.MODEL.ARCH]
assert len({len(pool_size), self.num_pathways}) == 1
assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys()
(d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH]
num_groups = cfg.RESNET.NUM_GROUPS
width_per_group = cfg.RESNET.WIDTH_PER_GROUP
dim_inner = num_groups * width_per_group
out_dim_ratio = (
cfg.SLOWFAST.BETA_INV // cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO
)
temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]
self.s1 = stem_helper.VideoModelStem(
dim_in=cfg.DATA.INPUT_CHANNEL_NUM,
dim_out=[width_per_group, width_per_group // cfg.SLOWFAST.BETA_INV],
kernel=[temp_kernel[0][0] + [7, 7], temp_kernel[0][1] + [7, 7]],
stride=[[1, 2, 2]] * 2,
padding=[
[temp_kernel[0][0][0] // 2, 3, 3],
[temp_kernel[0][1][0] // 2, 3, 3],
],
norm_module=self.norm_module,
)
self.s1_fuse = FuseFastToSlow(
width_per_group // cfg.SLOWFAST.BETA_INV,
cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,
cfg.SLOWFAST.FUSION_KERNEL_SZ,
cfg.SLOWFAST.ALPHA,
norm_module=self.norm_module,
)
self.s2 = resnet_helper.ResStage(
dim_in=[
width_per_group + width_per_group // out_dim_ratio,
width_per_group // cfg.SLOWFAST.BETA_INV,
],
dim_out=[
width_per_group * 4,
width_per_group * 4 // cfg.SLOWFAST.BETA_INV,
],
dim_inner=[dim_inner, dim_inner // cfg.SLOWFAST.BETA_INV],
temp_kernel_sizes=temp_kernel[1],
stride=cfg.RESNET.SPATIAL_STRIDES[0],
num_blocks=[d2] * 2,
num_groups=[num_groups] * 2,
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0],
nonlocal_inds=cfg.NONLOCAL.LOCATION[0],
nonlocal_group=cfg.NONLOCAL.GROUP[0],
nonlocal_pool=cfg.NONLOCAL.POOL[0],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
dilation=cfg.RESNET.SPATIAL_DILATIONS[0],
norm_module=self.norm_module,
)
self.s2_fuse = FuseFastToSlow(
width_per_group * 4 // cfg.SLOWFAST.BETA_INV,
cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,
cfg.SLOWFAST.FUSION_KERNEL_SZ,
cfg.SLOWFAST.ALPHA,
norm_module=self.norm_module,
)
for pathway in range(self.num_pathways):
pool = nn.MaxPool3d(
kernel_size=pool_size[pathway],
stride=pool_size[pathway],
padding=[0, 0, 0],
)
self.add_module("pathway{}_pool".format(pathway), pool)
self.s3 = resnet_helper.ResStage(
dim_in=[
width_per_group * 4 + width_per_group * 4 // out_dim_ratio,
width_per_group * 4 // cfg.SLOWFAST.BETA_INV,
],
dim_out=[
width_per_group * 8,
width_per_group * 8 // cfg.SLOWFAST.BETA_INV,
],
dim_inner=[dim_inner * 2, dim_inner * 2 // cfg.SLOWFAST.BETA_INV],
temp_kernel_sizes=temp_kernel[2],
stride=cfg.RESNET.SPATIAL_STRIDES[1],
num_blocks=[d3] * 2,
num_groups=[num_groups] * 2,
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1],
nonlocal_inds=cfg.NONLOCAL.LOCATION[1],
nonlocal_group=cfg.NONLOCAL.GROUP[1],
nonlocal_pool=cfg.NONLOCAL.POOL[1],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
dilation=cfg.RESNET.SPATIAL_DILATIONS[1],
norm_module=self.norm_module,
)
self.s3_fuse = FuseFastToSlow(
width_per_group * 8 // cfg.SLOWFAST.BETA_INV,
cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,
cfg.SLOWFAST.FUSION_KERNEL_SZ,
cfg.SLOWFAST.ALPHA,
norm_module=self.norm_module,
)
self.s4 = resnet_helper.ResStage(
dim_in=[
width_per_group * 8 + width_per_group * 8 // out_dim_ratio,
width_per_group * 8 // cfg.SLOWFAST.BETA_INV,
],
dim_out=[
width_per_group * 16,
width_per_group * 16 // cfg.SLOWFAST.BETA_INV,
],
dim_inner=[dim_inner * 4, dim_inner * 4 // cfg.SLOWFAST.BETA_INV],
temp_kernel_sizes=temp_kernel[3],
stride=cfg.RESNET.SPATIAL_STRIDES[2],
num_blocks=[d4] * 2,
num_groups=[num_groups] * 2,
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2],
nonlocal_inds=cfg.NONLOCAL.LOCATION[2],
nonlocal_group=cfg.NONLOCAL.GROUP[2],
nonlocal_pool=cfg.NONLOCAL.POOL[2],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
dilation=cfg.RESNET.SPATIAL_DILATIONS[2],
norm_module=self.norm_module,
)
self.s4_fuse = FuseFastToSlow(
width_per_group * 16 // cfg.SLOWFAST.BETA_INV,
cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,
cfg.SLOWFAST.FUSION_KERNEL_SZ,
cfg.SLOWFAST.ALPHA,
norm_module=self.norm_module,
)
self.s5 = resnet_helper.ResStage(
dim_in=[
width_per_group * 16 + width_per_group * 16 // out_dim_ratio,
width_per_group * 16 // cfg.SLOWFAST.BETA_INV,
],
dim_out=[
width_per_group * 32,
width_per_group * 32 // cfg.SLOWFAST.BETA_INV,
],
dim_inner=[dim_inner * 8, dim_inner * 8 // cfg.SLOWFAST.BETA_INV],
temp_kernel_sizes=temp_kernel[4],
stride=cfg.RESNET.SPATIAL_STRIDES[3],
num_blocks=[d5] * 2,
num_groups=[num_groups] * 2,
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3],
nonlocal_inds=cfg.NONLOCAL.LOCATION[3],
nonlocal_group=cfg.NONLOCAL.GROUP[3],
nonlocal_pool=cfg.NONLOCAL.POOL[3],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
dilation=cfg.RESNET.SPATIAL_DILATIONS[3],
norm_module=self.norm_module,
)
if cfg.DETECTION.ENABLE:
self.head = head_helper.ResNetRoIHead(
dim_in=[
width_per_group * 32,
width_per_group * 32 // cfg.SLOWFAST.BETA_INV,
],
num_classes=cfg.MODEL.NUM_CLASSES,
pool_size=[
[
cfg.DATA.NUM_FRAMES
// cfg.SLOWFAST.ALPHA
// pool_size[0][0],
1,
1,
],
[cfg.DATA.NUM_FRAMES // pool_size[1][0], 1, 1],
],
resolution=[[cfg.DETECTION.ROI_XFORM_RESOLUTION] * 2] * 2,
scale_factor=[cfg.DETECTION.SPATIAL_SCALE_FACTOR] * 2,
dropout_rate=cfg.MODEL.DROPOUT_RATE,
act_func=cfg.MODEL.HEAD_ACT,
aligned=cfg.DETECTION.ALIGNED,
)
else:
self.head = head_helper.ResNetBasicHead(
dim_in=[
width_per_group * 32,
width_per_group * 32 // cfg.SLOWFAST.BETA_INV,
],
num_classes=cfg.MODEL.NUM_CLASSES,
pool_size=[
[
cfg.DATA.NUM_FRAMES
// cfg.SLOWFAST.ALPHA
// pool_size[0][0],
cfg.DATA.CROP_SIZE // 32 // pool_size[0][1],
cfg.DATA.CROP_SIZE // 32 // pool_size[0][2],
],
[
cfg.DATA.NUM_FRAMES // pool_size[1][0],
cfg.DATA.CROP_SIZE // 32 // pool_size[1][1],
cfg.DATA.CROP_SIZE // 32 // pool_size[1][2],
],
],
dropout_rate=cfg.MODEL.DROPOUT_RATE,
act_func=cfg.MODEL.HEAD_ACT,
)
def forward(self, x, bboxes=None):
x = self.s1(x)
x = self.s1_fuse(x)
x = self.s2(x)
x = self.s2_fuse(x)
for pathway in range(self.num_pathways):
pool = getattr(self, "pathway{}_pool".format(pathway))
x[pathway] = pool(x[pathway])
x = self.s3(x)
x = self.s3_fuse(x)
x = self.s4(x)
x = self.s4_fuse(x)
x = self.s5(x)
if self.enable_detection:
x = self.head(x, bboxes)
else:
x = self.head(x)
return x
@MODEL_REGISTRY.register()
class ResNet(nn.Module):
"""
ResNet model builder. It builds a ResNet like network backbone without
lateral connection (C2D, I3D, Slow).
Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He.
"SlowFast networks for video recognition."
https://arxiv.org/pdf/1812.03982.pdf
Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He.
"Non-local neural networks."
https://arxiv.org/pdf/1711.07971.pdf
"""
def __init__(self, cfg):
"""
The `__init__` method of any subclass should also contain these
arguments.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
super(ResNet, self).__init__()
self.norm_module = get_norm(cfg)
self.enable_detection = cfg.DETECTION.ENABLE
self.num_pathways = 1
self._construct_network(cfg)
init_helper.init_weights(
self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN
)
def _construct_network(self, cfg):
"""
Builds a single pathway ResNet model.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
assert cfg.MODEL.ARCH in _POOL1.keys()
pool_size = _POOL1[cfg.MODEL.ARCH]
assert len({len(pool_size), self.num_pathways}) == 1
assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys()
(d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH]
num_groups = cfg.RESNET.NUM_GROUPS
width_per_group = cfg.RESNET.WIDTH_PER_GROUP
dim_inner = num_groups * width_per_group
temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]
self.s1 = stem_helper.VideoModelStem(
dim_in=cfg.DATA.INPUT_CHANNEL_NUM,
dim_out=[width_per_group],
kernel=[temp_kernel[0][0] + [7, 7]],
stride=[[1, 2, 2]],
padding=[[temp_kernel[0][0][0] // 2, 3, 3]],
norm_module=self.norm_module,
)
self.s2 = resnet_helper.ResStage(
dim_in=[width_per_group],
dim_out=[width_per_group * 4],
dim_inner=[dim_inner],
temp_kernel_sizes=temp_kernel[1],
stride=cfg.RESNET.SPATIAL_STRIDES[0],
num_blocks=[d2],
num_groups=[num_groups],
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0],
nonlocal_inds=cfg.NONLOCAL.LOCATION[0],
nonlocal_group=cfg.NONLOCAL.GROUP[0],
nonlocal_pool=cfg.NONLOCAL.POOL[0],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
stride_1x1=cfg.RESNET.STRIDE_1X1,
inplace_relu=cfg.RESNET.INPLACE_RELU,
dilation=cfg.RESNET.SPATIAL_DILATIONS[0],
norm_module=self.norm_module,
)
for pathway in range(self.num_pathways):
pool = nn.MaxPool3d(
kernel_size=pool_size[pathway],
stride=pool_size[pathway],
padding=[0, 0, 0],
)
self.add_module("pathway{}_pool".format(pathway), pool)
self.s3 = resnet_helper.ResStage(
dim_in=[width_per_group * 4],
dim_out=[width_per_group * 8],
dim_inner=[dim_inner * 2],
temp_kernel_sizes=temp_kernel[2],
stride=cfg.RESNET.SPATIAL_STRIDES[1],
num_blocks=[d3],
num_groups=[num_groups],
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1],
nonlocal_inds=cfg.NONLOCAL.LOCATION[1],
nonlocal_group=cfg.NONLOCAL.GROUP[1],
nonlocal_pool=cfg.NONLOCAL.POOL[1],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
stride_1x1=cfg.RESNET.STRIDE_1X1,
inplace_relu=cfg.RESNET.INPLACE_RELU,
dilation=cfg.RESNET.SPATIAL_DILATIONS[1],
norm_module=self.norm_module,
)
self.s4 = resnet_helper.ResStage(
dim_in=[width_per_group * 8],
dim_out=[width_per_group * 16],
dim_inner=[dim_inner * 4],
temp_kernel_sizes=temp_kernel[3],
stride=cfg.RESNET.SPATIAL_STRIDES[2],
num_blocks=[d4],
num_groups=[num_groups],
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2],
nonlocal_inds=cfg.NONLOCAL.LOCATION[2],
nonlocal_group=cfg.NONLOCAL.GROUP[2],
nonlocal_pool=cfg.NONLOCAL.POOL[2],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
stride_1x1=cfg.RESNET.STRIDE_1X1,
inplace_relu=cfg.RESNET.INPLACE_RELU,
dilation=cfg.RESNET.SPATIAL_DILATIONS[2],
norm_module=self.norm_module,
)
self.s5 = resnet_helper.ResStage(
dim_in=[width_per_group * 16],
dim_out=[width_per_group * 32],
dim_inner=[dim_inner * 8],
temp_kernel_sizes=temp_kernel[4],
stride=cfg.RESNET.SPATIAL_STRIDES[3],
num_blocks=[d5],
num_groups=[num_groups],
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3],
nonlocal_inds=cfg.NONLOCAL.LOCATION[3],
nonlocal_group=cfg.NONLOCAL.GROUP[3],
nonlocal_pool=cfg.NONLOCAL.POOL[3],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
stride_1x1=cfg.RESNET.STRIDE_1X1,
inplace_relu=cfg.RESNET.INPLACE_RELU,
dilation=cfg.RESNET.SPATIAL_DILATIONS[3],
norm_module=self.norm_module,
)
if self.enable_detection:
self.head = head_helper.ResNetRoIHead(
dim_in=[width_per_group * 32],
num_classes=cfg.MODEL.NUM_CLASSES,
pool_size=[[cfg.DATA.NUM_FRAMES // pool_size[0][0], 1, 1]],
resolution=[[cfg.DETECTION.ROI_XFORM_RESOLUTION] * 2],
scale_factor=[cfg.DETECTION.SPATIAL_SCALE_FACTOR],
dropout_rate=cfg.MODEL.DROPOUT_RATE,
act_func=cfg.MODEL.HEAD_ACT,
aligned=cfg.DETECTION.ALIGNED,
)
else:
self.head = head_helper.ResNetBasicHead(
dim_in=[width_per_group * 32],
num_classes=cfg.MODEL.NUM_CLASSES,
pool_size=[
[
cfg.DATA.NUM_FRAMES // pool_size[0][0],
cfg.DATA.CROP_SIZE // 32 // pool_size[0][1],
cfg.DATA.CROP_SIZE // 32 // pool_size[0][2],
]
],
dropout_rate=cfg.MODEL.DROPOUT_RATE,
act_func=cfg.MODEL.HEAD_ACT,
)
def forward(self, x, bboxes=None):
x = self.s1(x)
x = self.s2(x)
for pathway in range(self.num_pathways):
pool = getattr(self, "pathway{}_pool".format(pathway))
x[pathway] = pool(x[pathway])
x = self.s3(x)
x = self.s4(x)
x = self.s5(x)
if self.enable_detection:
x = self.head(x, bboxes)
else:
x = self.head(x)
return x
| [
"[email protected]"
] | |
cd4a79c68bdb5852d47593b9eae2e82afb13efcf | 01733042e84a768b77f64ec24118d0242b2f13b8 | /uhd_restpy/testplatform/sessions/ixnetwork/topology/ospfv3pseudointerface_bbc932877888c8c8400661ec299754a8.py | ccd8ef58ec5ea22207686188107961420a1e6fb2 | [
"MIT"
] | permissive | slieberth/ixnetwork_restpy | e95673905854bc57e56177911cb3853c7e4c5e26 | 23eeb24b21568a23d3f31bbd72814ff55eb1af44 | refs/heads/master | 2023-01-04T06:57:17.513612 | 2020-10-16T22:30:55 | 2020-10-16T22:30:55 | 311,959,027 | 0 | 0 | NOASSERTION | 2020-11-11T12:15:34 | 2020-11-11T12:06:00 | null | UTF-8 | Python | false | false | 13,844 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class Ospfv3PseudoInterface(Base):
"""Information for Simulated Router Interfaces
The Ospfv3PseudoInterface class encapsulates a list of ospfv3PseudoInterface resources that are managed by the system.
A list of resources can be retrieved from the server using the Ospfv3PseudoInterface.find() method.
"""
__slots__ = ()
_SDM_NAME = 'ospfv3PseudoInterface'
_SDM_ATT_MAP = {
'AdjSID': 'adjSID',
'BFlag': 'bFlag',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'EnableAdjSID': 'enableAdjSID',
'GFlag': 'gFlag',
'LFlag': 'lFlag',
'Metric': 'metric',
'Name': 'name',
'PFlag': 'pFlag',
'VFlag': 'vFlag',
'Weight': 'weight',
}
def __init__(self, parent):
super(Ospfv3PseudoInterface, self).__init__(parent)
@property
def AdjSID(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): An Adjacency Segment Identifier (Adj-SID) represents a router adjacency in Segment Routing
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AdjSID']))
@property
def BFlag(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): B Flag: Backup Flag: If set, the Adj-SID refers to an adjacency that is eligible for protection
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BFlag']))
@property
def Count(self):
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def EnableAdjSID(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Makes the Adjacency Segment Identifier (Adj-SID) available
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableAdjSID']))
@property
def GFlag(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): G-Flag: Group Flag: If set, the G-Flag indicates that the Adj-SID refers to a group of adjacencies where it may be assigned
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['GFlag']))
@property
def LFlag(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): L-Flag: Local Flag. If set, then the value/index carried by the SID has local significance
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LFlag']))
@property
def Metric(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Link Metric
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Metric']))
@property
def Name(self):
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def PFlag(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): P-Flag:Persistent Flag: If set, the SID is persistently allocated. The SID value remains consistent across router restart and session/interface flap
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PFlag']))
@property
def VFlag(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): V-Flag: Value flag. If set, then the SID carries an absolute value label value
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VFlag']))
@property
def Weight(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Weight of the SID for the purpose of load balancing
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Weight']))
def update(self, Name=None):
"""Updates ospfv3PseudoInterface resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Count=None, DescriptiveName=None, Name=None):
"""Finds and retrieves ospfv3PseudoInterface resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve ospfv3PseudoInterface resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all ospfv3PseudoInterface resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with matching ospfv3PseudoInterface resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of ospfv3PseudoInterface data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the ospfv3PseudoInterface resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, AdjSID=None, BFlag=None, EnableAdjSID=None, GFlag=None, LFlag=None, Metric=None, PFlag=None, VFlag=None, Weight=None):
"""Base class infrastructure that gets a list of ospfv3PseudoInterface device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- AdjSID (str): optional regex of adjSID
- BFlag (str): optional regex of bFlag
- EnableAdjSID (str): optional regex of enableAdjSID
- GFlag (str): optional regex of gFlag
- LFlag (str): optional regex of lFlag
- Metric (str): optional regex of metric
- PFlag (str): optional regex of pFlag
- VFlag (str): optional regex of vFlag
- Weight (str): optional regex of weight
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
def Abort(self):
"""Executes the abort operation on the server.
Abort CPF control plane (equals to demote to kUnconfigured state).
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
return self._execute('abort', payload=payload, response_object=None)
def Disconnect(self, *args, **kwargs):
"""Executes the disconnect operation on the server.
Disconnect Simulated Interface
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
disconnect(SessionIndices=list)
-------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
disconnect(SessionIndices=string)
---------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('disconnect', payload=payload, response_object=None)
def Reconnect(self, *args, **kwargs):
"""Executes the reconnect operation on the server.
Reconnect Simulated Interface
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
reconnect(SessionIndices=list)
------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
reconnect(SessionIndices=string)
--------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('reconnect', payload=payload, response_object=None)
def Start(self):
"""Executes the start operation on the server.
Start CPF control plane (equals to promote to negotiated state).
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
return self._execute('start', payload=payload, response_object=None)
def Stop(self):
"""Executes the stop operation on the server.
Stop CPF control plane (equals to demote to PreValidated-DoDDone state).
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
return self._execute('stop', payload=payload, response_object=None)
| [
"[email protected]"
] | |
43b9a78891cc24a21f373baf585e2e5eac2ae706 | b872ccff0c2f79886c0136b32da5f04cb8d3276c | /etcewrappers/emane/emaneshsnapshot.py | 41b82af702fc1382f8e41bee540758ecd9d07d0d | [] | no_license | prj8121/python-etce | 9c22b3a182f103f46b1d865d13ded277482e4a34 | bbd74a65280a09f3edc05457961b8c51ec009165 | refs/heads/master | 2022-11-18T05:19:19.324966 | 2020-04-02T15:15:47 | 2020-04-02T15:15:47 | 276,674,792 | 0 | 0 | null | 2020-07-02T14:57:07 | 2020-07-02T14:57:06 | null | UTF-8 | Python | false | false | 6,808 | py | #
# Copyright (c) 2015-2018 - Adjacent Link LLC, Bridgewater, New Jersey
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Adjacent Link LLC nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import absolute_import, division, print_function
import os.path
from etce.wrapper import Wrapper
try:
from emanesh.emaneshell import ControlPortClient
except:
from emane.shell import ControlPortClient
class EmaneshSnapshot(Wrapper):
"""
Log the stats, tables and config of a running emulator
instance to file.
"""
def register(self, registrar):
registrar.register_infile_name('emaneshsnapshot.flag')
registrar.register_argument('controlportendpoint',
'127.0.0.1:47000',
'The control port endpoint of the target ' \
'EMANE instance.')
def run(self, ctx):
# query emane instance if there is a local platform file
if not ctx.args.infile:
return
cp = None
try:
layermapping = {}
ipaddr,port = ctx.args.controlportendpoint.split(':')
cp = ControlPortClient(ipaddr, int(port))
logdirectory = ctx.args.logdirectory
# nem 3 shim0(phyapitestshim) phy(universalphy)
showfile = os.path.join(logdirectory, 'emaneshow.log')
with open(showfile, 'w') as showf:
for nemid, layertuples in list(cp.getManifest().items()):
layermapping[nemid] = []
line = 'nem %d ' % nemid
for buildid,layertype,layername in layertuples:
layerlabel = '%d-%s' % (buildid, layertype.lower())
layermapping[nemid].append((buildid, layertype.lower(), layerlabel))
line += ' %s(%s)' % (layertype.lower(), layername)
showf.write(line + '\n')
# statistics
statsfile = os.path.join(logdirectory, 'emanestats.log')
with open(statsfile, 'w') as sf:
# nems
for nemid,layertuples in sorted(layermapping.items()):
for buildid,_,layerlabel in layertuples:
for statname,statval in sorted(cp.getStatistic(buildid).items()):
sf.write('nem %d %s %s = %s\n' % (nemid, layerlabel, statname, str(statval[0])))
# emulator
for statname,statval in sorted(cp.getStatistic(0).items()):
sf.write('emulator %s = %s\n' % (statname, str(statval[0])))
# configuration
configfile = os.path.join(logdirectory, 'emaneconfig.log')
with open(configfile, 'w') as sf:
# nems
for nemid,layertuples in sorted(layermapping.items()):
for buildid,_,layerlabel in layertuples:
for configname,configvaltuples in sorted(cp.getConfiguration(buildid).items()):
configvalstr = ''
if configvaltuples:
configvalstr = ','.join(map(str, list(zip(*configvaltuples))[0]))
sf.write('nem %d %s %s = %s\n' % (nemid, layerlabel, configname, configvalstr))
# emulator
for configname,configvaltuples in sorted(cp.getConfiguration(0).items()):
configvalstr = ''
if configvaltuples:
configvalstr = ','.join(map(str, list(zip(*configvaltuples))[0]))
sf.write('emulator %s = %s\n' % (configname, configvalstr))
# statistic tables
tablefile = os.path.join(logdirectory, 'emanetables.log')
with open(tablefile, 'w') as tf:
# nems
for nemid,layertuples in sorted(layermapping.items()):
for buildid,layertype,_ in layertuples:
for tablename,data in sorted(cp.getStatisticTable(buildid).items()):
tf.write('nem %d %s %s\n' % (nemid, layertype, tablename))
self.write_table_cells(tf, data)
# emulator
for tablename,data in sorted(cp.getStatisticTable(0).items()):
tf.write('emulator %s\n' % tablename)
self.write_table_cells(tf, data)
finally:
if cp:
cp.stop()
def write_table_cells(self, tf, data):
labels,rowtuples = data
widths = [];
for label in labels:
widths.append(len(label))
rows = []
for rowtuple in rowtuples:
rows.append(list(map(str, list(zip(*rowtuple))[0])))
for row in rows:
for i,value in enumerate(row):
widths[i] = max(widths[i],len(value))
line = ''
for i,label in enumerate(labels):
line += '|' + label.ljust(widths[i])
line += "|\n"
tf.write(line)
for row in rows:
line = ''
for i,value in enumerate(row):
line += '|' + value.rjust(widths[i])
line += "|\n"
tf.write(line)
tf.write('\n')
def stop(self, ctx):
pass
| [
"[email protected]"
] | |
2825170807eb9df9a190facef2b577aa2de44ffe | 764f63ef031b38bde74657cd9bd198014ecfa0c7 | /alien.py | 82a55cbe8717ccdbb9cc68b2ba4f7b9fe6945b1e | [] | no_license | turkey66/alien_invasion | 4396668a8ca9737d340da9532d448cc90c7c3ed6 | dae2df7175a885c32951804c6ede6c53cedc749f | refs/heads/master | 2020-03-20T20:03:12.467496 | 2018-06-17T16:17:35 | 2018-06-17T16:17:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,248 | py | import pygame
from pygame.sprite import Sprite
class Alien(Sprite):
"""表示单个外星人的类"""
def __init__(self, ai_settings, screen):
"""初始化外星人并设置其起始位置"""
super().__init__()
self.screen = screen
self.ai_settings = ai_settings
# 加载外星人图像,并设置其rect属性
self.image = pygame.image.load('images/alien.bmp')
self.rect = self.image.get_rect()
# 每个外星人最初都在屏幕左上角附近
self.rect.x = self.rect.width
self.rect.y = self.rect.height
# 储存外星人的准确位置
self.x = float(self.rect.x)
def check_edges(self):
"""如果外星人位于屏幕边缘,就返回True"""
screen_rect = self.screen.get_rect()
if self.rect.right >= screen_rect.right:
return True
elif self.rect.left <= 0:
return True
def update(self):
"""向右或向左移动外星人"""
self.x += self.ai_settings.alien_speed_factor * self.ai_settings.fleet_direction
self.rect.x = self.x
def blitme(self):
"""在指定位置绘制外星人"""
self.screen.blit(self.image, self.rect) | [
"[email protected]"
] | |
ec12cb00959d4c864928b34ae07a95a09be47e7e | e0934ca26ac6c3f8816952ceafb3c84ace34d6aa | /resources/lib/services/nfsession/nfsession_ops.py | 1f8a2160764d032869515e944132a3d60cbd33c4 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | masQelec/plugin.video.netflix | 62e4f6416c0f9bf547a8a5980de9747f677236b6 | 90ebf3343ebeaf6b790fdb1048d78fe5bf127dde | refs/heads/master | 2023-02-09T17:07:36.200308 | 2021-01-04T01:36:55 | 2021-01-04T01:36:55 | 288,575,867 | 0 | 0 | MIT | 2021-01-04T01:36:56 | 2020-08-18T22:17:06 | Python | UTF-8 | Python | false | false | 14,863 | py | # -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2020 Stefano Gottardo (original implementation module)
Provides methods to perform operations within the Netflix session
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
import time
from datetime import datetime, timedelta
import xbmc
import resources.lib.common as common
import resources.lib.utils.website as website
from resources.lib.common import cache_utils
from resources.lib.common.exceptions import (NotLoggedInError, MissingCredentialsError, WebsiteParsingError,
MbrStatusAnonymousError, MetadataNotAvailable, LoginValidateError,
HttpError401, InvalidProfilesError)
from resources.lib.globals import G
from resources.lib.kodi import ui
from resources.lib.services.nfsession.session.path_requests import SessionPathRequests
from resources.lib.utils import cookies
from resources.lib.utils.logging import LOG, measure_exec_time_decorator
class NFSessionOperations(SessionPathRequests):
"""Provides methods to perform operations within the Netflix session"""
def __init__(self):
super().__init__()
# Slot allocation for IPC
self.slots = [
self.get_safe,
self.post_safe,
self.login,
self.login_auth_data,
self.logout,
self.path_request,
self.perpetual_path_request,
self.callpath_request,
self.fetch_initial_page,
self.refresh_session_data,
self.activate_profile,
self.parental_control_data,
self.get_metadata,
self.update_loco_context,
self.update_videoid_bookmark
]
# Share the activate profile function to SessionBase class
self.external_func_activate_profile = self.activate_profile
self.dt_initial_page_prefetch = None
# Try prefetch login
if self.prefetch_login():
try:
# Try prefetch initial page
response = self.get_safe('browse')
api_data = website.extract_session_data(response, update_profiles=True)
self.auth_url = api_data['auth_url']
self.dt_initial_page_prefetch = datetime.now()
except Exception as exc: # pylint: disable=broad-except
LOG.warn('Prefetch initial page failed: {}', exc)
@measure_exec_time_decorator(is_immediate=True)
def fetch_initial_page(self):
"""Fetch initial page"""
# It is mandatory fetch initial page data at every add-on startup to prevent/check possible side effects:
# - Check if the account subscription is regular
# - Avoid TooManyRedirects error, can happen when the profile used in nf session actually no longer exists
# - Refresh the session data
# - Update the profiles (and sanitize related features) without submitting another request
if self.dt_initial_page_prefetch and datetime.now() <= self.dt_initial_page_prefetch + timedelta(minutes=30):
# We do not know if/when the user will open the add-on, some users leave the device turned on more than 24h
# then we limit the prefetch validity to 30 minutes
self.dt_initial_page_prefetch = None
return
LOG.debug('Fetch initial page')
from requests import exceptions
try:
self.refresh_session_data(True)
except exceptions.TooManyRedirects:
# This error can happen when the profile used in nf session actually no longer exists,
# something wrong happen in the session then the server try redirect to the login page without success.
# (CastagnaIT: i don't know the best way to handle this borderline case, but login again works)
self.session.cookies.clear()
self.login()
def refresh_session_data(self, update_profiles):
response = self.get_safe('browse')
api_data = self.website_extract_session_data(response, update_profiles=update_profiles)
self.auth_url = api_data['auth_url']
@measure_exec_time_decorator(is_immediate=True)
def activate_profile(self, guid):
"""Set the profile identified by guid as active"""
LOG.debug('Switching to profile {}', guid)
current_active_guid = G.LOCAL_DB.get_active_profile_guid()
if guid == current_active_guid:
LOG.info('The profile guid {} is already set, activation not needed.', guid)
return
if xbmc.Player().isPlayingVideo():
# Change the current profile while a video is playing can cause problems with outgoing HTTP requests
# (MSL/NFSession) causing a failure in the HTTP request or sending data on the wrong profile
raise Warning('It is not possible select a profile while a video is playing.')
timestamp = time.time()
LOG.info('Activating profile {}', guid)
# 20/05/2020 - The method 1 not more working for switching PIN locked profiles
# INIT Method 1 - HTTP mode
# response = self._get('switch_profile', params={'tkn': guid})
# self.nfsession.auth_url = self.website_extract_session_data(response)['auth_url']
# END Method 1
# INIT Method 2 - API mode
try:
self.get_safe(endpoint='activate_profile',
params={'switchProfileGuid': guid,
'_': int(timestamp * 1000),
'authURL': self.auth_url})
except HttpError401 as exc:
# Profile guid not more valid
raise InvalidProfilesError('Unable to access to the selected profile.') from exc
# Retrieve browse page to update authURL
response = self.get_safe('browse')
self.auth_url = website.extract_session_data(response)['auth_url']
# END Method 2
G.LOCAL_DB.switch_active_profile(guid)
G.CACHE_MANAGEMENT.identifier_prefix = guid
cookies.save(self.session.cookies)
def parental_control_data(self, guid, password):
# Ask to the service if password is right and get the PIN status
from requests import exceptions
try:
response = self.post_safe('profile_hub',
data={'destination': 'contentRestrictions',
'guid': guid,
'password': password,
'task': 'auth'})
if response.get('status') != 'ok':
LOG.warn('Parental control status issue: {}', response)
raise MissingCredentialsError
except exceptions.HTTPError as exc:
if exc.response.status_code == 500:
# This endpoint raise HTTP error 500 when the password is wrong
raise MissingCredentialsError from exc
raise
# Warning - parental control levels vary by country or region, no fixed values can be used
# Note: The language of descriptions change in base of the language of selected profile
response_content = self.get_safe('restrictions',
data={'password': password},
append_to_address=guid)
extracted_content = website.extract_parental_control_data(response_content, response['maturity'])
response['profileInfo']['profileName'] = website.parse_html(response['profileInfo']['profileName'])
extracted_content['data'] = response
return extracted_content
def website_extract_session_data(self, content, **kwargs):
"""Extract session data and handle errors"""
try:
return website.extract_session_data(content, **kwargs)
except WebsiteParsingError as exc:
LOG.error('An error occurs in extract session data: {}', exc)
raise
except (LoginValidateError, MbrStatusAnonymousError) as exc:
LOG.warn('The session data is not more valid ({})', type(exc).__name__)
common.purge_credentials()
self.session.cookies.clear()
common.send_signal(signal=common.Signals.CLEAR_USER_ID_TOKENS)
raise NotLoggedInError from exc
@measure_exec_time_decorator(is_immediate=True)
def get_metadata(self, videoid, refresh=False):
"""Retrieve additional metadata for the given VideoId"""
if isinstance(videoid, list): # IPC call send the videoid as "path" list
videoid = common.VideoId.from_path(videoid)
# Get the parent VideoId (when the 'videoid' is a type of EPISODE/SEASON)
parent_videoid = videoid.derive_parent(common.VideoId.SHOW)
# Delete the cache if we need to refresh the all metadata
if refresh:
G.CACHE.delete(cache_utils.CACHE_METADATA, str(parent_videoid))
if videoid.mediatype == common.VideoId.EPISODE:
try:
metadata_data = self._episode_metadata(videoid, parent_videoid)
except KeyError as exc:
# The episode metadata not exist (case of new episode and cached data outdated)
# In this case, delete the cache entry and try again safely
LOG.debug('find_episode_metadata raised an error: {}, refreshing cache', exc)
try:
metadata_data = self._episode_metadata(videoid, parent_videoid, refresh_cache=True)
except KeyError as exc:
# The new metadata does not contain the episode
LOG.error('Episode metadata not found, find_episode_metadata raised an error: {}', exc)
raise MetadataNotAvailable from exc
else:
metadata_data = self._metadata(video_id=parent_videoid), None
return metadata_data
def _episode_metadata(self, episode_videoid, tvshow_videoid, refresh_cache=False):
if refresh_cache:
G.CACHE.delete(cache_utils.CACHE_METADATA, str(tvshow_videoid))
show_metadata = self._metadata(video_id=tvshow_videoid)
episode_metadata, season_metadata = common.find_episode_metadata(episode_videoid, show_metadata)
return episode_metadata, season_metadata, show_metadata
@cache_utils.cache_output(cache_utils.CACHE_METADATA, identify_from_kwarg_name='video_id', ignore_self_class=True)
def _metadata(self, video_id):
"""Retrieve additional metadata for a video.
This is a separate method from get_metadata() to work around caching issues
when new episodes are added to a tv show by Netflix."""
LOG.debug('Requesting metadata for {}', video_id)
metadata_data = self.get_safe(endpoint='metadata',
params={'movieid': video_id.value,
'_': int(time.time() * 1000)})
if not metadata_data:
# This return empty
# - if the metadata is no longer available
# - if it has been exported a tv show/movie from a specific language profile that is not
# available using profiles with other languages
raise MetadataNotAvailable
return metadata_data['video']
def update_loco_context(self, context_name):
"""Update a loco list by context"""
# Call this api seem no more needed to update the continueWatching loco list
# Get current loco root data
loco_data = self.path_request([['loco', [context_name], ['context', 'id', 'index']]])
loco_root = loco_data['loco'][1]
if 'continueWatching' in loco_data['locos'][loco_root]:
context_index = loco_data['locos'][loco_root]['continueWatching'][2]
context_id = loco_data['locos'][loco_root][context_index][1]
else:
# In the new profiles, there is no 'continueWatching' list and no list is returned
LOG.warn('update_loco_context: Update skipped due to missing context {}', context_name)
return
path = [['locos', loco_root, 'refreshListByContext']]
# After the introduction of LoCo, the following notes are to be reviewed (refers to old LoLoMo):
# The fourth parameter is like a request-id, but it does not seem to match to
# serverDefs/date/requestId of reactContext nor to request_id of the video event request,
# seem to have some kind of relationship with renoMessageId suspect with the logblob but i am not sure.
# I noticed also that this request can also be made with the fourth parameter empty.
params = [common.enclose_quotes(context_id),
context_index,
common.enclose_quotes(context_name),
'']
# path_suffixs = [
# [{'from': 0, 'to': 100}, 'itemSummary'],
# [['componentSummary']]
# ]
try:
response = self.callpath_request(path, params)
LOG.debug('refreshListByContext response: {}', response)
# The call response return the new context id of the previous invalidated loco context_id
# and if path_suffixs is added return also the new video list data
except Exception as exc: # pylint: disable=broad-except
LOG.warn('refreshListByContext failed: {}', exc)
if not LOG.level == LOG.LEVEL_VERBOSE:
return
ui.show_notification(title=common.get_local_string(30105),
msg='An error prevented the update the loco context on Netflix',
time=10000)
def update_videoid_bookmark(self, video_id):
"""Update the videoid bookmark position"""
# You can check if this function works through the official android app
# by checking if the red status bar of watched time position appears and will be updated, or also
# if continueWatching list will be updated (e.g. try to play a new tvshow not contained in the "my list")
call_paths = [['refreshVideoCurrentPositions']]
params = ['[' + video_id + ']', '[]']
try:
response = self.callpath_request(call_paths, params)
LOG.debug('refreshVideoCurrentPositions response: {}', response)
except Exception as exc: # pylint: disable=broad-except
LOG.warn('refreshVideoCurrentPositions failed: {}', exc)
ui.show_notification(title=common.get_local_string(30105),
msg='An error prevented the update the status watched on Netflix',
time=10000)
| [
"[email protected]"
] | |
100386502e3c3fa6f1d4f6adbb120b741631dee0 | 113f803b721984992bdc8f2177056b0f60de546a | /ex20/ex20.py | 196cbc7f5b0a27f8d770e8b025470d262aa428c5 | [] | no_license | Na-Young-Lee/16PFA-Na-Young-Lee | ddb215b0dc9cb0572b96aa90d8db71fbbea13c13 | 6cdcea12fd46a5218f9b6a7cd4ac5ee5e347cbb7 | refs/heads/master | 2021-01-17T15:15:27.429510 | 2016-06-01T11:37:20 | 2016-06-01T11:37:20 | 53,923,583 | 0 | 0 | null | null | null | null | UHC | Python | false | false | 1,160 | py | #-*-coding:cp949
from sys import argv # 시스템으로부터 매개변수를 가져온다
script, input_file = argv # 매개변수는 input_file(test.txt)이다.
def print_all(f): # 함수 print_all, f는 변수
print(f.read()) # 이 파일의 모든 내용을 읽어라
def rewind(f): # 함수 rewind, f는 변수
f.seek(0) # 시작 위치로 돌아감.
# seek(): 자기 테이프 위에 원하는 위차를 지정하기 위함.
# seek(n): 시작위치로부터 n byte 위치의 자료를 읽기/쓰기 준비하라는 의미
def print_a_line(line_count,f):
print("%d %s" % (line_count, f.readline())) # read_line 은 파일을 한 행씩 읽으라는 의미
current_file = open(input_file)
print ("First let's print the whole file:굈")
print_all(current_file) # print_all 호출
print ("Now let's rewind, kind of like a tape.")
rewind(current_file) # rewind 호출
print("Let's print three lines:")
current_line = 1
print_a_line(current_line, current_file)
current_line = current_line +1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file) | [
"CAD Client"
] | CAD Client |
63da80f93496f9f06ca03c36ca38b215fd4ab8d9 | d7a4701e18be0f38820f5c15d80099fda6385f9f | /ABC106/B.py | dd13915b9a187916ed4f7f263d2be9013e8353ba | [] | no_license | shiki7/Atcoder | 979a6f0eeb65f3704ea20a949940a0d5e3434579 | c215c02d3bfe1e9d68846095b1bd706bd4557dd0 | refs/heads/master | 2022-05-21T16:59:01.529489 | 2022-04-29T11:26:42 | 2022-04-29T11:26:42 | 201,536,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | N = int(input())
def is_divisor_counter8(num):
count = 0
for i in range(1, num+1):
if num % i == 0:
count += 1
return True if count == 8 else False
count = 0
for i in range(1, N+1, 2):
if is_divisor_counter8(i):
count += 1
print(count)
| [
"[email protected]"
] | |
d06f80d7cbcfeb09aca48198e849b2aeff779bcd | 1cff45f4b9beab91e06c30f4f2de2d719706fcdd | /tools/log2thbrep.py | a1b5b9a2906431d63e81738290657503cf7cb0ac | [] | no_license | chilamkatana/thbattle | 2c1b30bd243a216454e075f636b5c92c1df77f86 | 5219509c58f1b96bfd431f84e405f4f9aa981809 | refs/heads/master | 2021-01-18T00:07:46.283892 | 2016-06-19T15:12:30 | 2016-06-19T15:12:30 | 62,997,020 | 1 | 0 | null | 2016-07-10T12:13:04 | 2016-07-10T12:13:03 | null | UTF-8 | Python | false | false | 2,158 | py | # -*- coding: utf-8 -*-
# -- prioritized --
import sys
sys.path.append('../src')
# -- stdlib --
from urlparse import urljoin
import argparse
import gzip
import json
# -- third party --
# -- own --
from client.core.replay import Replay
from game import autoenv
from settings import ACCOUNT_FORUMURL
# -- code --
def gen_fake_account(name, is_freeplay):
if is_freeplay:
acc = ['freeplay', 1, name]
else:
acc = ['forum', 1, name, {
'title': u'转换的Replay只有名字啊……',
'avatar': urljoin(ACCOUNT_FORUMURL, '/maoyu.png'),
'credits': 1000,
'games': 0,
'drops': 0,
'badges': [],
}]
return {'account': acc, 'state': 'ingame'}
def main():
autoenv.init('Client')
parser = argparse.ArgumentParser('log2thbrep')
parser.add_argument('replay_file', help='Server side replay')
parser.add_argument('client_version', help='Desired client version (git commit)')
parser.add_argument('--freeplay', action='store_true', help='Use freeplay account module?')
options = parser.parse_args()
if options.replay_file.endswith('.gz'):
data = gzip.open(options.replay_file, 'r').read()
else:
data = open(options.replay_file, 'r').read()
data = data.decode('utf-8').split('\n')
names = data.pop(0)[2:].split(', ') # Names
data.pop(0) # Ver
gid = int(data.pop(0).split()[-1]) # GameId
data.pop(0) # Time
game_mode, game_params, rnd_seed, usergdhist, gdhist = data
gdhist = json.loads(gdhist)
game_params = json.loads(game_params)
rep = Replay()
rep.client_version = options.client_version
rep.game_mode = game_mode
rep.game_params = game_params
rep.users = [gen_fake_account(i, options.freeplay) for i in names]
assert len(names) == len(gdhist), [names, len(gdhist)]
for i, gd in enumerate(gdhist):
fn = '%s_%s.thbrep' % (gid, i)
with open(fn, 'w') as f:
print 'Writing %s...' % fn
rep.me_index = i
rep.gamedata = gd
f.write(rep.dumps())
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
3c12d70f689a90719f80a3bd3c5077bf23834e0f | 0e478f3d8b6c323c093455428c9094c45de13bac | /src/OTLMOW/OEFModel/Classes/Dummy.py | e96d20609045111125c43f47cdf1e3ee6f8fca51 | [
"MIT"
] | permissive | davidvlaminck/OTLMOW | c6eae90b2cab8a741271002cde454427ca8b75ba | 48f8c357c475da1d2a1bc7820556843d4b37838d | refs/heads/main | 2023-01-12T05:08:40.442734 | 2023-01-10T15:26:39 | 2023-01-10T15:26:39 | 432,681,113 | 3 | 1 | MIT | 2022-06-20T20:36:00 | 2021-11-28T10:28:24 | Python | UTF-8 | Python | false | false | 1,195 | py | # coding=utf-8
from OTLMOW.OEFModel.EMObject import EMObject
from OTLMOW.OEFModel.EMAttribuut import EMAttribuut
from OTLMOW.OTLModel.Datatypes.StringField import StringField
# Generated with OEFClassCreator. To modify: extend, do not edit
class Dummy(EMObject):
"""DUMMY Installatie"""
typeURI = 'https://lgc.data.wegenenverkeer.be/ns/installatie#Dummy'
label = 'Dummy'
def __init__(self):
super().__init__()
self._notitieDummy = EMAttribuut(field=StringField,
naam='notitie (DUMMY)',
label='notitie (DUMMY)',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Dummy.notitieDummy',
definitie='Definitie nog toe te voegen voor eigenschap notitie (DUMMY)',
owner=self)
@property
def notitieDummy(self):
"""Definitie nog toe te voegen voor eigenschap notitie (DUMMY)"""
return self._notitieDummy.waarde
@notitieDummy.setter
def notitieDummy(self, value):
self._notitieDummy.set_waarde(value, owner=self)
| [
"[email protected]"
] | |
f86c3abe1a96c40b82537c05c9790353dd1c4268 | 3dd43ff0dab514a39f611487ab421256b3b5b13b | /scripts/client/bootcamp/BootcampGarage.py | b78fd948610779a2240fe345836bca9b820783ad | [] | no_license | kusaku/wotscripts | 04ab289e3fec134e290355ecf81cf703af189f72 | a89c2f825d3c7dade7bc5163a6c04e7f5bab587d | refs/heads/master | 2023-08-20T00:17:36.852522 | 2018-02-26T14:53:44 | 2018-02-26T14:53:44 | 80,610,354 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39,786 | py | # Embedded file name: scripts/client/bootcamp/BootcampGarage.py
import BigWorld
from functools import partial
from BootCampEvents import g_bootcampEvents
from BootcampConstants import BOOTCAMP_MESSAGE_WINDOW, MESSAGE_BOTTOM_RENDERER, getBootcampInternalHideElementName
from constants import QUEUE_TYPE
from debug_utils_bootcamp import LOG_DEBUG_DEV_BOOTCAMP, LOG_CURRENT_EXCEPTION_BOOTCAMP, LOG_ERROR_BOOTCAMP
from helpers import dependency, aop
from skeletons.gui.game_control import IBootcampController
from skeletons.gui.shared import IItemsCache
from helpers.i18n import makeString
from math import ceil
from helpers import i18n, time_utils
from copy import deepcopy
from gui.Scaleform.genConsts.NODE_STATE_FLAGS import NODE_STATE_FLAGS
from gui.Scaleform.daapi.view.lobby.techtree.settings import NODE_STATE
from nations import NAMES as NATION_NAMES
from gui.prb_control.events_dispatcher import g_eventDispatcher
from gui.prb_control import prbEntityProperty
from gui.Scaleform.daapi.view.lobby.header import battle_selector_items
from gui.Scaleform.daapi.settings.views import VIEW_ALIAS
from gui.shared import events, g_eventBus, EVENT_BUS_SCOPE
from PlayerEvents import g_playerEvents
from BootcampGarageLessons import ACTION_PARAM, LESSON_PARAM, GarageLessons, GarageActions
from Bootcamp import g_bootcamp
from gui.Scaleform.Waiting import Waiting
from shared_utils import BoundMethodWeakref
from aop.in_garage import PointcutBattleSelectorHintText
from adisp import process, async
import SoundGroups
class MakeSandboxSelectedAspect(aop.Aspect):
def atReturn(self, cd):
original_return_value = cd.returned
original_return_value.entityTypeID = QUEUE_TYPE.SANDBOX
return original_return_value
class MakeSandboxSelected(aop.Pointcut):
def __init__(self):
aop.Pointcut.__init__(self, 'gui.prb_control.dispatcher', '_PreBattleDispatcher', 'getFunctionalState', aspects=(MakeSandboxSelectedAspect,))
class ACTION_TYPE_NAME():
UNKNOWN_STR = 'unknown'
INIT_STR = 'init'
SHOW_MESSAGE_STR = 'msg'
SHOW_ELEMENT_STR = 'show'
HIGHLIGHT_BUTTON_STR = 'highlightButton'
CALLBACK_STR = 'callback'
CONDITIONAL_STR = 'conditional'
class ACTION_TYPE():
UNKNOWN = 0
CALLBACK = 1
SHOW_MESSAGE = 2
SHOW_ELEMENT = 3
HIGHLIGHT_BUTTON = 4
HIGHLIGHT_VEHICLE = 5
INIT = 6
CONDITIONAL = 7
def getActionType(actionName):
if actionName.startswith(ACTION_TYPE_NAME.CALLBACK_STR):
return ACTION_TYPE.CALLBACK
if actionName.startswith(ACTION_TYPE_NAME.SHOW_MESSAGE_STR):
return ACTION_TYPE.SHOW_MESSAGE
if actionName.startswith(ACTION_TYPE_NAME.SHOW_ELEMENT_STR):
return ACTION_TYPE.SHOW_ELEMENT
if actionName.startswith(ACTION_TYPE_NAME.HIGHLIGHT_BUTTON_STR):
return ACTION_TYPE.HIGHLIGHT_BUTTON
if actionName.startswith(ACTION_TYPE_NAME.INIT_STR):
return ACTION_TYPE.INIT
if actionName.startswith(ACTION_TYPE_NAME.CONDITIONAL_STR):
return ACTION_TYPE.CONDITIONAL
return ACTION_TYPE.UNKNOWN
class CyclicController():
def __init__(self):
self.__elements = set()
def checkCyclic(self, element, info):
LOG_DEBUG_DEV_BOOTCAMP('[checkCyclic] Element {0}'.format(element))
if element == '':
raise Exception('[checkCyclic] Empty element found!')
if element in self.__elements:
raise Exception('[checkCyclic] Infinite cycle found! Aborting! Info - {0}'.format(info))
self.__elements.add(element)
class BootcampGarageActions():
def __init__(self):
self.viewActions = {}
self.__garageLessons = GarageLessons()
self.__garageActions = GarageActions()
self.actionStart = None
self.actionFinish = None
self.__lessonFinished = False
return
def getLessonById(self, lessonId):
return self.__garageLessons.getLesson(lessonId)
def getBattleResultsById(self, lessonId):
return self.__garageLessons.getBattleResult(lessonId)
def getActionByName(self, actionName, isSaveToServer = True):
action = self.__garageActions.getAction(actionName)
if isSaveToServer:
self.__lessonFinished = actionName == self.actionFinish
if self.__lessonFinished or ACTION_PARAM.SAVE in action and action[ACTION_PARAM.SAVE]:
g_bootcampGarage.setCheckpoint(actionName)
g_bootcampGarage.saveCheckpointToServer()
if self.__lessonFinished:
g_bootcampGarage.clear()
return action
def isLessonFinished(self):
return self.__lessonFinished
def getViewActions(self, viewAlias):
return self.viewActions.get(viewAlias, []) + self.viewActions.get('all', [])
def setViewAction(self, viewAlias, action):
if viewAlias not in self.viewActions:
self.viewActions[viewAlias] = []
self.viewActions[viewAlias].append(action)
def resetViewAction(self, viewAlias):
if viewAlias in self.viewActions:
del self.viewActions[viewAlias]
def clearAllViewActions(self):
self.viewActions.clear()
class BootcampGarageLesson(object):
itemsCache = dependency.descriptor(IItemsCache)
bootcampCtrl = dependency.descriptor(IBootcampController)
def __init__(self):
self.__started = False
self.__lessonId = 0
self.__checkpoint = ''
self.__account = None
self.__lobbyAssistant = None
self.__bootcampGarageActions = None
self.__prevHint = None
self.__nextHint = None
self.__hardcodeHint = None
self.__secondVehicleInvID = None
self.__isInPreview = False
self.isLessonSuspended = False
self.__callbacks = {'onBattleReady': BoundMethodWeakref(self.onBattleReady),
'onBootcampFinished': BoundMethodWeakref(self.finishBootcamp),
'setBattleSelectorHintText': BoundMethodWeakref(self.setBattleSelectorHintText),
'clear': BoundMethodWeakref(self.clear),
'showFinalVideo': BoundMethodWeakref(self.showFinalVideo),
'disableResearchButton': BoundMethodWeakref(self.disableResearchButton),
'enableResearchButton': BoundMethodWeakref(self.enableResearchButton),
'disableVehiclePreviewBuyButton': BoundMethodWeakref(self.disableVehiclePreviewBuyButton),
'enableVehiclePreviewBuyButton': BoundMethodWeakref(self.enableVehiclePreviewBuyButton)}
self.__hintsLoaded = False
self.__hangarLoaded = False
self.__battleSelectorHintPointcutIndex = None
self._actions = {ACTION_TYPE.INIT: BoundMethodWeakref(self.initAction),
ACTION_TYPE.CALLBACK: BoundMethodWeakref(self.callbackAction),
ACTION_TYPE.SHOW_MESSAGE: BoundMethodWeakref(self.showMessage),
ACTION_TYPE.SHOW_ELEMENT: BoundMethodWeakref(self.showElement),
ACTION_TYPE.HIGHLIGHT_BUTTON: BoundMethodWeakref(self.highlightButton),
ACTION_TYPE.CONDITIONAL: BoundMethodWeakref(self.conditionalAction)}
self.__showActionsHistory = []
self.__deferredAliases = []
self.__itemCacheSyncCallbacks = []
return
@prbEntityProperty
def prbEntity(self):
pass
@property
def isLessonFinished(self):
garageActions = self.getBootcampGarageActions()
return garageActions.isLessonFinished()
@property
def canGoToBattle(self):
prbEntity = self.prbEntity
if prbEntity is None:
return False
else:
result = prbEntity.canPlayerDoAction()
canDo, canDoMsg = result.isValid, result.restriction
return canDo
def onBattleReady(self):
g_bootcampEvents.onBattleReady()
def clear(self):
self.__started = False
self.__prevHint = None
self.__nextHint = None
self.__hardcodeHint = None
self.isLessonSuspended = False
self.__account = None
self.__showActionsHistory = []
self.__hintsLoaded = False
self.__hangarLoaded = False
g_bootcamp.removePointcut(self.__battleSelectorHintPointcutIndex)
self.__battleSelectorHintPointcutIndex = None
self.__bootcampGarageActions.clearAllViewActions()
return
def getBootcampGarageActions(self):
if self.__bootcampGarageActions is None:
self.__bootcampGarageActions = BootcampGarageActions()
return self.__bootcampGarageActions
def start(self):
if self.__started:
return
else:
self.__started = True
self.isLessonSuspended = False
self.__showActionsHistory = []
self.__bootcampGarageActions = None
self.getBootcampGarageActions()
lesson = self.__bootcampGarageActions.getLessonById(self.__lessonId)
self.__bootcampGarageActions.actionStart = lesson[LESSON_PARAM.ACTION_START]
self.__bootcampGarageActions.actionFinish = lesson[LESSON_PARAM.ACTION_FINISH]
lastLesson = g_bootcamp.getContextIntParameter('lastLessonNum')
if self.__lessonId == lastLesson:
self.__prevHint = None
self.__nextHint = None
self.__hardcodeHint = None
if self.__checkpoint != '':
self.enableCheckpointGUI()
currentAction = self.runCheckpoint()
else:
currentAction = self.__bootcampGarageActions.actionStart
startCallback = self.getCallbackByName(currentAction)
startCallback()
if self.__lessonId != lastLesson:
self.setAllViewActions(currentAction)
if currentAction != self.__bootcampGarageActions.actionFinish:
g_bootcampEvents.onBattleNotReady()
g_bootcampEvents.onEnterPreview += self.onEnterPreview
g_bootcampEvents.onExitPreview += self.onExitPreview
return
def init(self, lessonId, account):
garageActions = self.getBootcampGarageActions()
garageActions.clearAllViewActions()
self.__checkpoint = ''
self.__prevHint = None
self.__nextHint = None
self.__hardcodeHint = None
self.__lessonId = lessonId
self.__account = account
self.__showActionsHistory = []
self.__secondVehicleInvID = None
self.__updateSecondVehicleInvID()
return
def onInventoryUpdate(self, _, __):
self.runViewAlias(VIEW_ALIAS.LOBBY_HANGAR)
def runCheckpoint(self):
LOG_DEBUG_DEV_BOOTCAMP('Run checkpoint {0}'.format(self.__checkpoint))
self.runCustomAction(self.__checkpoint)
return self.__checkpoint
def setCheckpoint(self, checkpoint):
self.__checkpoint = checkpoint
def saveCheckpointToServer(self):
if g_playerEvents.isPlayerEntityChanging:
LOG_DEBUG_DEV_BOOTCAMP('events.isPlayerEntityChanging', g_playerEvents.isPlayerEntityChanging)
return
else:
if self.__account is not None:
self.__account.base.saveBootcampCheckpoint(self.__checkpoint, self.__lessonId)
return
def enableCheckpointGUI(self):
actionStart = self.__bootcampGarageActions.actionStart
checkpoint = self.__checkpoint
currentAction = actionStart
visibleElements = []
cyclicController = CyclicController()
cyclicError = 'enableCheckpointGUI, lessonId - {0}, currentAction - {1}'
LOG_DEBUG_DEV_BOOTCAMP('Start to enableCheckpointGUI, lessonId - {0}, currentAction - {1}, checkpoint - {2}'.format(self.__lessonId, currentAction, self.__checkpoint))
while currentAction != checkpoint:
cyclicController.checkCyclic(currentAction, cyclicError.format(self.__lessonId, currentAction))
if currentAction is None:
return
currentAction = self.collectVisibleElements(currentAction, visibleElements)
for action in self.__showActionsHistory:
self.collectVisibleElements(action, visibleElements)
if visibleElements:
self.enableGarageGUIElements(visibleElements)
return
def collectVisibleElements(self, currentAction, visibleElements):
action = self.__bootcampGarageActions.getActionByName(currentAction, False)
nextHint = action[ACTION_PARAM.NEXT_HINT]
actionType = getActionType(currentAction)
elements = []
if actionType == ACTION_TYPE.INIT:
elements = action[ACTION_PARAM.VISIBLE].split(' ')
elif actionType == ACTION_TYPE.SHOW_ELEMENT:
elements = action[ACTION_PARAM.SHOW].split(' ')
if elements:
visibleElements.extend(elements)
return nextHint
def runViewAlias(self, viewAlias):
lastLesson = g_bootcamp.getContextIntParameter('lastLessonNum')
if not self.isLessonSuspended and self.__lessonId != lastLesson:
viewActions = self.__bootcampGarageActions.getViewActions(viewAlias)
for action in viewActions:
name = action[ACTION_PARAM.NAME]
condition = action['show_condition']['condition']
nationData = self.getNationData()
if getActionType(name) == ACTION_TYPE.SHOW_MESSAGE:
if name in self.__showActionsHistory:
continue
if condition.recheckOnItemSync() and viewAlias not in self.__deferredAliases:
self.__deferredAliases.append(viewAlias)
if condition.checkCondition(nationData) == action['show_condition']['result']:
if 'prevHint' in action['show_condition']:
if self.__prevHint != action['show_condition']['prevHint']:
continue
if getActionType(name) == ACTION_TYPE.HIGHLIGHT_BUTTON and name == self.__prevHint:
return True
self.hideAllHints()
callbackAction = self.getCallbackByName(name)
callbackAction()
return True
return False
def hideAllHints(self):
self.__prevHint = None
self.__hardcodeHint = None
g_eventBus.handleEvent(events.LoadViewEvent(events.BootcampEvent.REMOVE_ALL_HIGHLIGHTS, None, None), EVENT_BUS_SCOPE.LOBBY)
return
@process
def onViewLoaded(self, view, _):
if view is not None and view.settings is not None:
alias = view.settings.alias
doStart = False
if not self.__hintsLoaded and alias == VIEW_ALIAS.BOOTCAMP_LOBBY_HIGHLIGHTS:
self.__hintsLoaded = True
doStart = self.__hangarLoaded
if not self.__hangarLoaded and alias == VIEW_ALIAS.LOBBY_HANGAR:
self.__hangarLoaded = True
doStart = self.__hintsLoaded
if doStart:
self.start()
return
if alias != VIEW_ALIAS.LOBBY_MENU and alias != VIEW_ALIAS.BOOTCAMP_MESSAGE_WINDOW:
if self.isLessonSuspended and alias == VIEW_ALIAS.LOBBY_HANGAR:
secondVehicleSelected = yield self.isSecondVehicleSelectedAsync()
if not secondVehicleSelected:
self.checkSecondVehicleHintEnabled()
self.highlightLobbyHint('SecondTank', True, True)
return
if self.__checkpoint != self.__bootcampGarageActions.actionFinish:
visibleElements = []
for action in self.__showActionsHistory:
self.collectVisibleElements(action, visibleElements)
if visibleElements:
self.changeGarageGUIElementsVisibility(visibleElements, False)
self.hideExcessElements()
foundHints = False
VIEWS_WITH_CONDITIONAL_HINTS = (VIEW_ALIAS.PERSONAL_CASE, VIEW_ALIAS.VEHICLE_PREVIEW)
if alias not in VIEWS_WITH_CONDITIONAL_HINTS:
foundHints = self.runViewAlias(alias)
VIEWS_TO_HIGHLIGHT_RETURN_FROM = (VIEW_ALIAS.LOBBY_RESEARCH, VIEW_ALIAS.LOBBY_TECHTREE)
if not foundHints and alias in VIEWS_TO_HIGHLIGHT_RETURN_FROM:
self.checkReturnToHangar()
elif self.canGoToBattle:
LOG_DEBUG_DEV_BOOTCAMP('onViewLoaded - calling onBattleReady (view {0})'.format(alias))
self.onBattleReady()
return
def onViewClosed(self, viewAlias):
if viewAlias in self.__deferredAliases:
self.__deferredAliases.remove(viewAlias)
def setAllViewActions(self, currentActionName):
cyclicController = CyclicController()
cyclicError = 'setAllViewActions, lessonId - {0}, currentAction - {1}'
LOG_DEBUG_DEV_BOOTCAMP('Start to setAllViewActions, lessonId - {0}, currentAction - {1}, checkpoint - {2}'.format(self.__lessonId, currentActionName, self.__bootcampGarageActions.actionFinish))
self.__bootcampGarageActions.clearAllViewActions()
allViewActionsSet = False
while not allViewActionsSet:
cyclicController.checkCyclic(currentActionName, cyclicError.format(self.__lessonId, currentActionName))
action = self.__bootcampGarageActions.getActionByName(currentActionName, False)
if 'show_condition' in action:
views = action['show_condition']['views']
for view in views:
self.__bootcampGarageActions.setViewAction(view, action)
if currentActionName == self.__bootcampGarageActions.actionFinish:
allViewActionsSet = True
if not allViewActionsSet:
currentActionName = action[ACTION_PARAM.NEXT_HINT]
def runCustomAction(self, customAction):
callback = self.getCallbackByName(customAction)
if callback:
callback()
def getCallbackByName(self, callbackName):
LOG_DEBUG_DEV_BOOTCAMP('__getCallback - {0}'.format(callbackName))
if self.isLessonSuspended:
return
elif not callbackName:
return
actionType = getActionType(callbackName)
action = self._actions.get(actionType, None)
if action:
return partial(action, callbackName)
else:
return
def initAction(self, actionName):
if actionName is None:
return
else:
initAction = self.__bootcampGarageActions.getActionByName(actionName)
visibleElements = initAction[ACTION_PARAM.VISIBLE].split(' ')
if visibleElements:
self.enableGarageGUIElements(visibleElements)
nextHint = initAction[ACTION_PARAM.NEXT_HINT]
callback = self.getCallbackByName(nextHint)
if callback is not None:
callback()
return
def callbackAction(self, actionName):
if actionName is None:
return
else:
callbackAction = self.__bootcampGarageActions.getActionByName(actionName)
if callbackAction['callback'] in self.__callbacks:
callback = self.__callbacks[callbackAction['callback']]
callback()
nextHint = callbackAction[ACTION_PARAM.NEXT_HINT]
callback = self.getCallbackByName(nextHint)
if callback is not None:
callback()
return
def showMessage(self, actionName):
if actionName is None:
return
else:
ctx = {'messages': [],
'voiceovers': []}
actionType = ACTION_TYPE.SHOW_MESSAGE
cyclicController = CyclicController()
cyclicError = 'showMessage, lessonId - {0}, currentAction - {1}'
while actionType == ACTION_TYPE.SHOW_MESSAGE:
cyclicController.checkCyclic(actionName, cyclicError.format(self.__lessonId, actionName))
message = self.__bootcampGarageActions.getActionByName(actionName)
hextHint = message[ACTION_PARAM.NEXT_HINT]
hextHintType = getActionType(hextHint)
nation = NATION_NAMES[self.getNation()]
voiceover = message[ACTION_PARAM.NATIONS_DATA][nation].get(ACTION_PARAM.VOICEOVER, None)
if not voiceover:
voiceover = message.get(ACTION_PARAM.VOICEOVER, None)
ctx[ACTION_PARAM.VOICEOVERS].append(voiceover)
callback = None
if hextHintType != ACTION_TYPE.SHOW_MESSAGE:
actionType = hextHintType
callback = self.getCallbackByName(hextHint)
if callback is not None:
ctx['removedCallback'] = callback
else:
actionName = hextHint
showBottomData = True
showRewardMessage = True
if not self.bootcampCtrl.needAwarding():
showBottomData = not bool(message[ACTION_PARAM.ONLY_FIRST_BOOTCAMP_BOTTOM])
showRewardMessage = not bool(message[ACTION_PARAM.ONLY_FIRST_BOOTCAMP])
if showRewardMessage:
ctx['messages'].append(self._createMessageContext(message, showBottomData))
elif callback is not None:
callback()
return
self.suspendLesson()
g_eventBus.handleEvent(events.LoadViewEvent(VIEW_ALIAS.BOOTCAMP_MESSAGE_WINDOW, None, ctx), EVENT_BUS_SCOPE.LOBBY)
return
def showElement(self, actionName):
if actionName is None or actionName in self.__showActionsHistory:
return
else:
self.__showActionsHistory.append(actionName)
showAction = self.__bootcampGarageActions.getActionByName(actionName)
voiceover = showAction.get(ACTION_PARAM.VOICEOVER, '')
if voiceover:
SoundGroups.g_instance.playSound2D(voiceover)
hextHint = showAction[ACTION_PARAM.NEXT_HINT]
callback = None
if hextHint:
callback = self.getCallbackByName(hextHint)
showElements = showAction[ACTION_PARAM.SHOW].split(' ')
if showElements:
self.showGarageGUIElements(showElements, callback)
if showAction[ACTION_PARAM.FORCE]:
self.showNextHint()
else:
self.suspendLesson()
return
def highlightButton(self, actionName, hideHint = False):
if actionName is None:
return
else:
highlight = self.__bootcampGarageActions.getActionByName(actionName)
hintAction = events.BootcampEvent.REMOVE_HIGHLIGHT
highlightElement = highlight[ACTION_PARAM.ELEMENT]
if not hideHint:
hintAction = events.BootcampEvent.ADD_HIGHLIGHT
self.__prevHint = actionName
self.__nextHint = highlight[ACTION_PARAM.NEXT_HINT]
self.__hardcodeHint = None
g_eventBus.handleEvent(events.LoadViewEvent(hintAction, None, highlightElement), EVENT_BUS_SCOPE.LOBBY)
if highlight[ACTION_PARAM.FORCE]:
self.showNextHint()
return
def conditionalAction(self, actionName):
action = self.__bootcampGarageActions.getActionByName(actionName)
condition = action['show_condition']['condition']
nationData = self.getNationData()
if condition.checkCondition(nationData) == action['show_condition']['result']:
conditionalAction = action[ACTION_PARAM.CONDITIONAL_ACTION]
callbackAction = self.getCallbackByName(conditionalAction)
callbackAction()
return
else:
nextHint = action[ACTION_PARAM.NEXT_HINT]
callback = self.getCallbackByName(nextHint)
if callback is not None:
callback()
return
def preprocessBottomData(self, data):
ctx = g_bootcamp.getContext()
if data.get('label_format', None) is not None:
if 'bonuses' in ctx:
lessonBonuses = ctx['bonuses']['battle'][self.__lessonId - 1]
labelFormat = data['label_format']
if labelFormat == 'getCredits':
nationId = ctx['nation']
nationsData = lessonBonuses.get('nations', None)
if nationsData is not None:
formatedValue = BigWorld.wg_getIntegralFormat(nationsData[NATION_NAMES[nationId]]['credits']['win'][0])
data['label'] = data['label'].format(formatedValue)
elif labelFormat == 'getExperience':
nationId = ctx['nation']
nationsData = lessonBonuses.get('nations', None)
if nationsData is not None:
formatedValue = BigWorld.wg_getIntegralFormat(nationsData[NATION_NAMES[nationId]]['xp']['win'][0])
data['label'] = data['label'].format(formatedValue)
elif labelFormat == 'getGold':
data['label'] = data['label'].format(lessonBonuses['gold'])
elif labelFormat == 'getPremiumHours':
hours = lessonBonuses['premium']
timeInSeconds = hours * time_utils.ONE_HOUR
if timeInSeconds > time_utils.ONE_DAY:
time = ceil(timeInSeconds / time_utils.ONE_DAY)
timeMetric = i18n.makeString('#menu:header/account/premium/days')
else:
time = ceil(timeInSeconds / time_utils.ONE_HOUR)
timeMetric = i18n.makeString('#menu:header/account/premium/hours')
data['label'] = data['label'].format(str(int(time)) + ' ' + timeMetric)
elif labelFormat == 'getRepairKits':
data['label'] = data['label'].format(lessonBonuses['equipment']['largeRepairkit']['count'])
elif labelFormat == 'getFirstAid':
data['label'] = data['label'].format(lessonBonuses['equipment']['largeMedkit']['count'])
elif labelFormat == 'getFireExtinguisher':
data['label'] = data['label'].format(lessonBonuses['equipment']['handExtinguishers']['count'])
data['label'] = makeString(data['label'])
data['description'] = makeString(data['description'])
data['content_data'] = None
return
def startLobbyAssistance(self):
if self.__lobbyAssistant is None:
from Assistant import LobbyAssistant
hints = {}
if self.bootcampCtrl.getLessonNum() == 1:
hints = {'hintRotateLobby': {'time_completed': 3.0,
'timeout': 1.0,
'angle': 180.0,
'cooldown_after': 0.0,
'message': ''}}
self.__lobbyAssistant = LobbyAssistant(hints)
self.__lobbyAssistant.start()
return
def stopLobbyAssistance(self):
if self.__lobbyAssistant is not None:
self.__lobbyAssistant.stop()
self.__lobbyAssistant = None
return
def updateLobbyLobbySettings(self, elementsList, isHide = False):
if elementsList is not None:
for element in elementsList:
elementHide = getBootcampInternalHideElementName(element)
self.bootcampCtrl.updateLobbySettingsVisibility(elementHide, isHide)
return
def changeGarageGUIElementsVisibility(self, elementsList, isHide, update = True):
lobbySettings = self.bootcampCtrl.getLobbySettings()
self.updateLobbyLobbySettings(elementsList, isHide)
if update:
g_eventBus.handleEvent(events.LoadViewEvent(events.BootcampEvent.SET_VISIBLE_ELEMENTS, None, lobbySettings), EVENT_BUS_SCOPE.LOBBY)
return
def disableGarageGUIElements(self, elementsList):
self.changeGarageGUIElementsVisibility(elementsList, True)
def enableGarageGUIElements(self, elementsList):
self.changeGarageGUIElementsVisibility(elementsList, False)
def showGarageGUIElements(self, elementsList, callback):
self.startLobbyAssistance()
LOG_DEBUG_DEV_BOOTCAMP('showGarageGUIElements', elementsList)
g_eventBus.handleEvent(events.LoadViewEvent(events.BootcampEvent.SHOW_NEW_ELEMENTS, None, {'keys': elementsList,
'callback': callback}), EVENT_BUS_SCOPE.LOBBY)
self.changeGarageGUIElementsVisibility(elementsList, False, update=False)
return
def showNextHint(self):
self.__showHint(self.__nextHint)
def showPrevHint(self):
self.__showHint(self.__prevHint)
def getPrevHint(self):
return self.__prevHint
def hideHint(self):
if self.__prevHint is not None:
LOG_DEBUG_DEV_BOOTCAMP('hideHint - {0}'.format(self.__prevHint))
callback = self.getCallbackByName(self.__prevHint)
self.__prevHint = None
if callback is not None:
callback(True)
return
def hidePrevShowNextHint(self):
self.hideHint()
self.showNextHint()
def suspendLesson(self):
self.isLessonSuspended = True
self.hideAllHints()
def resumeLesson(self):
self.isLessonSuspended = False
def highlightLobbyHint(self, lobbyHint, isShow = True, isForce = False):
newHardcodeHint = lobbyHint if isShow else None
if not self.isLessonSuspended and newHardcodeHint != self.__hardcodeHint or isForce:
if isShow:
self.hideAllHints()
self.__hardcodeHint = newHardcodeHint
actionType = events.BootcampEvent.REMOVE_HIGHLIGHT
if isShow:
actionType = events.BootcampEvent.ADD_HIGHLIGHT
g_eventBus.handleEvent(events.LoadViewEvent(actionType, None, lobbyHint), EVENT_BUS_SCOPE.LOBBY)
return
def setBattleSelectorHintText(self):
if self.__battleSelectorHintPointcutIndex is None:
self.__battleSelectorHintPointcutIndex = g_bootcamp.addPointcut(PointcutBattleSelectorHintText)
g_eventDispatcher.updateUI()
return
def removeBattleSelectorHintText(self):
if self.__battleSelectorHintPointcutIndex is not None:
g_bootcamp.removePointcut(self.__battleSelectorHintPointcutIndex)
self.__battleSelectorHintPointcutIndex = None
g_eventDispatcher.updateUI()
return
def showBootcampGraduateMessage(self):
self.resumeLesson()
self.runCustomAction('msgBootcampGraduate')
def toDefaultAccount(self):
self.clear()
g_bootcampEvents.onRequestBootcampFinish()
def finishBootcamp(self):
Waiting.show('login')
self.clear()
g_bootcampEvents.onGarageLessonFinished(self.__lessonId)
self.toDefaultAccount()
g_bootcampEvents.onEnterPreview -= self.onEnterPreview
g_bootcampEvents.onExitPreview -= self.onExitPreview
def onEnterPreview(self):
self.__isInPreview = True
def onExitPreview(self):
self.__isInPreview = False
self.showPrevHint()
def isInPreview(self):
return self.__isInPreview
def showFinalVideo(self):
g_bootcamp.showFinalVideo()
def getNationData(self):
return g_bootcamp.getNationData()
def getNation(self):
return self.bootcampCtrl.nation
def getSecondVehicleInvId(self):
""" Returns inventory ID of the second vehicle that the player must purchase according to Bootcamp flow.
:return: None if inventory cache is not synced yet,
-1 if there is no second vehicle in inventory,
valid inventory ID otherwise.
"""
return self.__secondVehicleInvID
def isSecondVehicleSelected(self):
invID = self.getSecondVehicleInvId()
if invID is None:
return False
elif invID == -1:
return True
else:
from CurrentVehicle import g_currentVehicle
return invID == g_currentVehicle.invID
@async
@process
def isSecondVehicleSelectedAsync(self, callback):
yield self.__waitForItemCacheSync()
callback(self.isSecondVehicleSelected())
@process
def selectLessonVehicle(self):
selected = yield self.isSecondVehicleSelectedAsync()
invID = selected or self.getSecondVehicleInvId()
if not invID is not None:
raise AssertionError
from CurrentVehicle import g_currentVehicle
g_currentVehicle.selectVehicle(invID)
return
def checkReturnToHangar(self):
if self.isLessonSuspended:
g_bootcampGarage.highlightLobbyHint('HangarButton', True, True)
elif self.isLessonFinished:
if self.canGoToBattle:
LOG_DEBUG_DEV_BOOTCAMP("checkReturnToHangar - hiding 'HangarButton' highlight (isLessonFinished and canGoToBattle)")
g_bootcampGarage.highlightLobbyHint('HangarButton', False, True)
else:
LOG_DEBUG_DEV_BOOTCAMP("checkReturnToHangar - highlighting 'HangarButton' (isLessonFinished and not canGoToBattle)")
g_bootcampGarage.highlightLobbyHint('HangarButton', True, True)
elif self.__lessonId == g_bootcamp.getContextIntParameter('randomBattleLesson'):
name = 'hideHeaderBattleSelector'
if name in self.bootcampCtrl.getLobbySettings():
if self.bootcampCtrl.getLobbySettings()[name]:
g_bootcampGarage.highlightLobbyHint('HangarButton', True, True)
return
try:
items = battle_selector_items.getItems()
if not items.isSelected('random'):
return
except:
LOG_CURRENT_EXCEPTION_BOOTCAMP()
LOG_ERROR_BOOTCAMP('battle_selector_items exception')
g_bootcampGarage.highlightLobbyHint('HangarButton', True, True)
else:
g_bootcampGarage.highlightLobbyHint('HangarButton', True, True)
@process
def hideExcessElements(self):
selected = yield self.isSecondVehicleSelectedAsync()
if not selected:
excessElements = ['HangarEquipment',
'HangarOptionalDevices',
'HangarQuestControl',
'HeaderBattleSelector']
self.disableGarageGUIElements(excessElements)
g_bootcampEvents.onRequestCloseTechnicalMaintenance()
@process
def checkSecondVehicleHintEnabled(self):
self.hideExcessElements()
selected = yield self.isSecondVehicleSelectedAsync()
if selected:
self.highlightLobbyHint('SecondTank', False, True)
self.enableCheckpointGUI()
self.resumeLesson()
self.runCheckpoint()
self.runViewAlias(VIEW_ALIAS.LOBBY_HANGAR)
else:
g_bootcampEvents.onBattleNotReady()
self.suspendLesson()
self.highlightLobbyHint('SecondTank', True, True)
def getBattleResultsExtra(self, lessonId):
return self.__bootcampGarageActions.getBattleResultsById(lessonId)
def updateNode(self, node):
node.setState(NODE_STATE_FLAGS.UNLOCKED)
def setSecondVehicleNode(self, secondVehicleNode):
self.updateNode(secondVehicleNode)
secondVehicleNode.setState(NODE_STATE.addIfNot(secondVehicleNode.getState(), NODE_STATE_FLAGS.WAS_IN_BATTLE))
def setModuleNode(self, moduleNode):
self.updateNode(moduleNode)
moduleNode.setState(NODE_STATE.addIfNot(moduleNode.getState(), NODE_STATE_FLAGS.IN_INVENTORY))
def initSubscriptions(self):
self.itemsCache.onSyncCompleted += self.__onItemCacheSyncCompleted
def destroySubscriptions(self):
self.itemsCache.onSyncCompleted -= self.__onItemCacheSyncCompleted
def disableResearchButton(self):
g_bootcampEvents.onRequestChangeResearchButtonState(False)
def enableResearchButton(self):
g_bootcampEvents.onRequestChangeResearchButtonState(True)
def disableVehiclePreviewBuyButton(self):
g_bootcampEvents.onRequestChangeVehiclePreviewBuyButtonState(False)
def enableVehiclePreviewBuyButton(self):
g_bootcampEvents.onRequestChangeVehiclePreviewBuyButtonState(True)
def _createMessageContext(self, message, showBottomData):
nationId = self.getNation()
messageNation = message[ACTION_PARAM.NATIONS_DATA][NATION_NAMES[nationId]]
m_ctx = {'messagePreset': BOOTCAMP_MESSAGE_WINDOW[messageNation[ACTION_PARAM.PRESET]],
'label': makeString(messageNation[ACTION_PARAM.LABEL]),
'iconPath': messageNation[ACTION_PARAM.ICON],
'message': ''}
if showBottomData:
m_ctx['message'] = makeString(messageNation[ACTION_PARAM.TEXT])
if messageNation[ACTION_PARAM.BACKGROUND]:
m_ctx[ACTION_PARAM.BACKGROUND] = messageNation[ACTION_PARAM.BACKGROUND]
if messageNation[ACTION_PARAM.BOTTOM_RENDERER] != -1 and showBottomData:
bottomRenderer = MESSAGE_BOTTOM_RENDERER[messageNation[ACTION_PARAM.BOTTOM_RENDERER]]
m_ctx['bottomRenderer'] = bottomRenderer
m_ctx['bottomData'] = []
for bottom in messageNation['bottom']:
processedBottom = dict(bottom)
self.preprocessBottomData(processedBottom)
bottomDataElement = {'label': processedBottom['label'],
'icon': processedBottom['icon'],
'description': processedBottom['description'],
'contentData': processedBottom['content_data'],
'iconTooltip': processedBottom['iconTooltip'],
'labelTooltip': processedBottom['labelTooltip']}
m_ctx['bottomData'].append(bottomDataElement)
return m_ctx
def __showHint(self, hintName):
if hintName is not None:
LOG_DEBUG_DEV_BOOTCAMP('showNextHint - {0}'.format(hintName))
callback = self.getCallbackByName(hintName)
if callback is not None:
callback()
return
@async
def __waitForItemCacheSync(self, callback):
if self.itemsCache.isSynced():
callback(True)
else:
self.__itemCacheSyncCallbacks.append(lambda : callback(self.itemsCache.isSynced()))
def __onItemCacheSyncCompleted(self, *_):
self.__updateSecondVehicleInvID()
callbacks = self.__itemCacheSyncCallbacks
self.__itemCacheSyncCallbacks = []
for callback in callbacks:
callback()
BigWorld.callback(0.01, self.__processDeferredAliases)
def __updateSecondVehicleInvID(self):
if self.itemsCache.isSynced():
nationData = self.getNationData()
vehicleCD = nationData['vehicle_second']
vehicle = self.itemsCache.items.getItemByCD(vehicleCD)
self.__secondVehicleInvID = vehicle.invID
def __processDeferredAliases(self):
for alias in self.__deferredAliases:
self.runViewAlias(alias)
del self.__deferredAliases[:]
g_bootcampGarage = BootcampGarageLesson() | [
"[email protected]"
] | |
aaf45a9200b30d752b7d7761ba15eabd843892ff | 71c7683331a9037fda7254b3a7b1ffddd6a4c4c8 | /PIDCalib/CalibDataScripts/jobs/Stripping5TeV/Lam0/ganga_Lam0Fit_MagUp.py | f823e503e3df060e3276df738366d614877ed200 | [] | no_license | pseyfert-cern-gitlab-backup/Urania | edc58ba4271089e55900f8bb4a5909e9e9c12d35 | 1b1c353ed5f1b45b3605990f60f49881b9785efd | refs/heads/master | 2021-05-18T13:33:22.732970 | 2017-12-15T14:42:04 | 2017-12-15T14:42:04 | 251,259,622 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,567 | py | # set the stripping version
stripVersion = "5TeV"
# magnet 'Up' or 'Down'?
magPol='Up'
# file suffix:
#
# dst_k_and_pi: Kaons and pions from D*
# lam0_p: Protons from Lambda0
# jpsi_mu: Muons from J/psi
# dst_k_and_pi_muonUnBiased: 'MuonUnBiased' kaons + pions from D*
# lam0_p_muonUnBiased: 'MuonUnBiased' protons from Lambda0
fileSuffix='lam0_p'
# set the pbs options (e.g. CPU/walltime)
pbsopts = "-l cput=8:00:00,walltime=12:00:00"
# track name (e.g. K, Pi)
trackName="p"
# particle type (e.g. DSt, Lam0, Jpsi)
partType="Lam0"
# the platform to run on
# if this is not set, it will default to the value of CMTCONFIG
platform=''
# the job name (which will be appended with the stripping version, magnet polarity etc)
jobname="Lam0Fit_P"
# is this a test job?
isTest=False
##########################################################################################################
# The following lines should not need to be changed in most cases, as they are autoconfigured from the
# above options
##########################################################################################################
import os
import re
import sys
# set the platform (if not already specified)
if len(platform)==0:
platform=os.getenv('CMTCONFIG')
# get the Urania version from the script path
abspath = os.path.abspath(os.path.dirname(sys.argv[0]))
rematch = re.search('.Urania_(?P<ver>v\d+r\d+p?\d?).', abspath)
UraniaVersion=rematch.group('ver')
# uncomment to set the Urania version manually
#UraniaVersion="v1r1"
# get the User_release_area (i.e. top-level CMT directory,
# which defaults to $HOME/cmtuser)
User_release_area = os.getenv('User_release_area')
if len(User_release_area)==0:
User_release_area="%s/cmtuser" %os.getenv('HOME')
# uncomment to set the User_release_area manually
#User_release_area="/home/huntp/cmtuser"
# base directory of $CALIBDATASCRIPTSROOT
basedir = '%s/Urania_%s/PIDCalib/CalibDataScripts' %(User_release_area,
UraniaVersion)
# location of the executable
exeFile = '%s/scripts/sh/%sJob_runRange.sh' %(basedir, partType)
# read the configuration script
import imp
gangaJobFuncs=imp.load_source('gangaJobFuncs',
'%s/scripts/python/gangaJobFuncs.py' %basedir)
gangaJobFuncs.updateEnvFromShellScript( ('{bdir}/jobs/Stripping{strp}'
'/configureGangaJobs.sh').format(
bdir=basedir,strp=stripVersion))
jidVar = ''
if magPol=='Down':
jidVar='CALIBDATA_JIDS_DOWN'
elif magPol=='Up':
jidVar='CALIBDATA_JIDS_UP'
else:
raise NameError('Unknown magnet polarity %s' %magPol)
jids_str=os.getenv(jidVar)
if len(jids_str)==0:
raise NameError('Environmental variable %s is not set' %jidVar)
jobIDs=[int(jid) for jid in jids_str.split()]
# uncomment to set the input job IDs manually
#jobIDs=[7,9]
# assume the user's ganga directory is the input directory
gangadir='%s/workspace/%s/%s' %(config['Configuration']['gangadir'],
config['Configuration']['user'],
config['Configuration']['repositorytype'])
# uncomment to use a different input directory
#gangadir='$DATADISK/gangadir_calib/workspace/powell/LocalXML'
#gangadir= '/data/lhcb/users/hunt/gangadir_calib/workspace/huntp/LocalXML'
# use the PBS backend and set the CPU/walltime etc.
bck = PBS()
bck.extraopts = pbsopts
if isTest:
bck.queue = 'testing'
# Uncomment to use the local backend
#bck = Local()
subIDString="*"
## configure the jobs
if isTest:
jobname='Test'+jobname
for jid in jobIDs:
# configure the job comment
jobcomment='Input from Job ID %d' %jid
if isTest:
jobcomment='TEST - '+jobcomment
# get the number of chopped tree
nChoppedTrees = gangaJobFuncs.getNumChoppedTrees(gangadir, jid,
fileSuffix)
if isTest:
# run over ~10% of all events,
# and only process one "chopped tree" (index 0)
nChoppedTrees = 1
nSubJobs =len(jobs(jid).subjobs)
subIDString = "{"+",".join([str(s) for s in range(nSubJobs/10)])+"}"
# Make the lists of arguments used by the ArgSplitter
#
# Arguments are:
#
# 1) top-level input directory (usually the ganga repository)
# 2) Urania version
# 3) platform (e.g. 'x86_64-slc6-gcc46-opt')
# 4) track type
# 5) magnet polarity
# 6) stripping version
# 7) index
# 8) file suffix (e.g. 'dst_k_and_pi')
# 9) verbose flag (0 = no verbose info, 1 = verbose info)
# 10) exit on bad fit flag ( 0 = don't exit, 1 = do exit (
# 11) subjobID string ('*' for all subjobs, '{0,1,2}' for first 3
# subjobs etc.)
argLists = [ [ gangadir,
UraniaVersion,
platform,
trackName,
magPol,
stripVersion,
str(idx),
fileSuffix,
str(int(isTest)),
'1',
subIDString ] for idx in range(nChoppedTrees) ]
splitter = ArgSplitter(args=argLists)
# configure the application
app= Executable(
exe = File(exeFile),
)
j = Job(
name = '{jname}_S{strp}_Mag{pol}_{suf}'.format(jname=jobname,
strp=stripVersion, pol=magPol, suf=fileSuffix),
comment = jobcomment,
outputfiles = ['*.root', '*.eps'] ,
application=app,
backend=bck,
splitter=splitter
)
j.submit()
| [
"[email protected]"
] | |
9a050e8af3a0f33b423d7e500a3c375688e6fc12 | ba91eb5329fd8e69aa9d9fe1e74e2c7b968806c7 | /robocode-python-ls-core/src/robocode_ls_core/unittest_tools/cases_fixture.py | ba40c23945a6b2d060d0511504b35d5688caabc8 | [
"Apache-2.0"
] | permissive | emanlove/robotframework-lsp | aba9deb43ee7fdd3328e08b4d904d6c4ca44e185 | b0d8862d24e3bc1b72d8ce9412a671571520e7d9 | refs/heads/master | 2022-12-06T01:04:04.103593 | 2020-08-30T15:56:43 | 2020-08-30T15:56:43 | 292,014,577 | 1 | 0 | NOASSERTION | 2020-09-01T14:05:52 | 2020-09-01T14:05:51 | null | UTF-8 | Python | false | false | 1,570 | py | import os.path
class CasesFixture(object):
def __init__(self, copy_to_dir: str, original_resources_dir: str):
"""
Upon initialization copies the `original_resources_dir` to
`copy_to_dir`.
So, for instance, we may copy the contents from
/my/test/resource
to
/temp/pytest-101/folder with spaces/resource
Subsequent requests to get the path will access it in the
place we copied it to.
Note: it should usually be bound to a session scope so that
the copy isn't done at each call.
"""
from robocode_ls_core.copytree import copytree_dst_exists
copytree_dst_exists(original_resources_dir, copy_to_dir)
self.resources_dir = copy_to_dir
assert os.path.exists(self.resources_dir)
def get_path(self, resources_relative_path: str, must_exist=True) -> str:
"""
Returns a path from the resources dir.
"""
path = os.path.join(self.resources_dir, resources_relative_path)
if must_exist:
assert os.path.exists(path), "%s does not exist." % (path,)
return path
def copy_to(self, case: str, dest_dir: str):
"""
Helper to copy a given path to a given directory.
To be used if a given path should be within another structure or
if its contents should be mutated.
"""
import shutil
src = self.get_path(case, must_exist=True)
shutil.copytree(src, dest_dir)
| [
"[email protected]"
] | |
ded0f7ed8ac286c43c9d29d7d977eca66a5abe6a | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-8113.py | d62d31ef237d64f11e21225d75aaf0a1ade776a6 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,755 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:$ID) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
2f0695813aafeda05fd430df7c0449e407cb4e4a | ac2f43c8e0d9649a7f063c59b3dffdfed9fd7ed7 | /common/recipes-rest/rest-api/files/common_logging.py | cb60bf695450c4cc5c97453d1354d71f6db5c614 | [] | no_license | facebook/openbmc | bef10604ced226288600f55248b7f1be9945aea4 | 32777c66a8410d767eae15baabf71c61a0bef13c | refs/heads/helium | 2023-08-17T03:13:54.729494 | 2023-08-16T23:24:18 | 2023-08-16T23:24:18 | 31,917,712 | 684 | 331 | null | 2023-07-25T21:19:08 | 2015-03-09T19:18:35 | C | UTF-8 | Python | false | false | 3,993 | py | #!/usr/bin/env python3
#
# Copyright 2014-present Facebook. All Rights Reserved.
#
# This program file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program in a file named COPYING; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA
#
import datetime
import logging
import os
import sys
from typing import Any, Dict
import json_log_formatter
class OpenBMCJSONFormatter(json_log_formatter.JSONFormatter):
def json_record(
self, message: str, extra: Dict[str, Any], record: logging.LogRecord
) -> Dict[str, Any]:
# for access logs discard the message field
# all information is already included in extra
if record.name != "aiohttp.access":
extra["message"] = message
else:
# reformat access log request time to isoformat
extra["request_time"] = datetime.datetime.strptime(
extra["request_time"][1:-1], "%d/%b/%Y:%H:%M:%S %z"
).isoformat()
# Include loglevel
extra["level"] = record.levelname
if "time" not in extra:
extra["time"] = datetime.datetime.utcnow()
if record.exc_info:
extra["exc_info"] = self.formatException(record.exc_info)
return extra
def mutate_json_record(self, json_record: Dict[str, Any]):
for attr_name, attr in json_record.items():
if isinstance(attr, datetime.datetime):
json_record[attr_name] = attr.isoformat()
return json_record
class JsonSyslogFormatter(OpenBMCJSONFormatter):
def format(self, record) -> str:
return "rest-api: %s" % (super(JsonSyslogFormatter, self).format(record))
ACCESS_LOG_FORMAT = (
'%a %l %u %t "%r" %s %b %Dus "%{identity}o" "%{Referrer}i" "%{User-Agent}i"'
)
def get_logger_config(config):
if os.path.exists("/dev/log"):
rsyslog_config = {
"level": "INFO",
"formatter": "syslog_" + config["logformat"],
"class": "logging.handlers.SysLogHandler",
"address": "/dev/log",
}
else:
rsyslog_config = {
"level": "INFO",
"formatter": config["logformat"],
"class": "logging.StreamHandler",
"stream": sys.stdout,
}
LOGGER_CONF = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {"format": "%(message)s"},
"json": {"()": "common_logging.OpenBMCJSONFormatter"},
"syslog_json": {"()": "common_logging.JsonSyslogFormatter"},
"syslog_default": {"format": "rest-api: %(message)s"},
},
"handlers": {
"file": {
"level": "INFO",
"formatter": config["logformat"],
"class": "logging.handlers.RotatingFileHandler",
"filename": config["logfile"],
"maxBytes": 1048576,
"backupCount": 3,
"encoding": "utf8",
},
"syslog": rsyslog_config,
"stdout": {
"level": "INFO",
"formatter": config["logformat"],
"class": "logging.StreamHandler",
"stream": sys.stdout,
},
},
"loggers": {
"": {
"handlers": [config["loghandler"]],
"level": "DEBUG",
"propagate": True,
}
},
}
return LOGGER_CONF
| [
"[email protected]"
] | |
d9eb34855131358c7270e0fa5ad93d1566736595 | 8f46693b9933763cadb8f9272e6451cb0f7a3e8e | /conductor/devices/blue_pmt/__init__.py | 3a587eb13c200e44055625096de8e262ae4b1103 | [] | no_license | yesrgang/labrad_tools | f4099a2c365400b4501a261855dd122b3845c09f | 7441b2cb00f851b491136a8e0e9a3bf374c132c4 | refs/heads/master | 2018-10-05T11:03:20.386418 | 2018-09-20T22:13:18 | 2018-09-20T22:13:18 | 28,723,788 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,707 | py | import json
from labrad.wrappers import connectAsync
from time import strftime
from twisted.internet.defer import inlineCallbacks
from conductor_device.conductor_parameter import ConductorParameter
class Recorder(ConductorParameter):
priority = 1
recorders = {
'image': 'record_g',
'image_clock': 'record_eg',
'image_ft': 'record_eg',
}
@inlineCallbacks
def initialize(self):
yield self.connect()
yield self.cxn.yesr10_andor.select_device('ikon')
@inlineCallbacks
def update(self):
recorder_type = ''
sequence = self.conductor.parameters['sequencer']['sequence'].value
for subsequence, recorder in self.recorders.items():
if subsequence in sequence:
recorder_type = recorder
experiment_name = self.conductor.experiment_name
experiment_number = self.conductor.experiment_number
point_number = self.conductor.point_number
if experiment_name is not None:
record_name = '{}#{}-image#{}.hdf5'.format(experiment_name,
experiment_number, point_number)
else:
record_name = 'current-image.hdf5'
record_path = [strftime('%Y%m%d'), record_name]
if self.value is None:
self.value = {}
if recorder_type:
# recorder_type = self.value.get('type', recorder_type)
recorder_config = json.dumps(self.value.get('config', {}))
yield self.cxn.yesr10_andor.record(record_path, recorder_type,
recorder_config)
yield self.conductor.set_parameter_value('andor', 'image_path',
record_path, True)
| [
"[email protected]"
] | |
541985950b3efb0c43cbfe5c764f2684dae8b431 | 990a8f72428655d22775ee9dc2c52b7d6de98e4d | /config.py | 099c5e6c905132b2718e17f60e5c5c6b52cbbc51 | [] | no_license | saulshanabrook/cosc465-iprouter | a2c55906d2ef16e101f9eab7459a3a9cc49de1a0 | a8b08e4929ca13fe34f33c7a433f6cf9670ddf3d | refs/heads/master | 2021-01-18T10:32:59.159476 | 2015-04-15T22:21:58 | 2015-04-15T22:21:58 | 31,486,286 | 0 | 1 | null | 2015-03-01T04:05:49 | 2015-03-01T04:05:49 | null | UTF-8 | Python | false | false | 255 | py | python_interpreter = "python" # the interpreter to use
project = "anaconda" # the name of the project
extra_paths = None # a list of extra paths
port = '19360' # the port to listen on (as string)
| [
"[email protected]"
] | |
25be5db700331b2813ceafbe554648b1b9bac0b5 | 959122eea21cec24a4cf32808a24482feda73863 | /store/admin.py | 2688e5c86b17695b37c5f342258f8b995eb6a74d | [] | no_license | AsadullahFarooqi/InventoryWebApp | 9fbe6ccafcb93bb5cb1879b728954867014d0afd | 07e8e6cb06e11f8ef6ada6a590e52f569a8c2d6b | refs/heads/master | 2020-06-18T15:06:18.612258 | 2019-07-11T07:32:00 | 2019-07-11T07:32:00 | 196,341,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | from django.contrib import admin
from .models import (
Store,
StoreEmployers,
Customer,
Supplier,
Products,
# ContainersTypes,
Imported,
Exported,
PaymentsToSuppliers,
PaymentsOfCustomers,
EmployersLedger,
)
# Register your models here.
admin.site.register(Store)
admin.site.register(StoreEmployers)
admin.site.register(Customer)
admin.site.register(Supplier)
admin.site.register(Products)
admin.site.register(Imported)
admin.site.register(Exported)
admin.site.register(EmployersLedger)
# admin.site.register(ContainersTypes)
admin.site.register(PaymentsToSuppliers)
admin.site.register(PaymentsOfCustomers)
| [
"[email protected]"
] | |
2654cbe512ac34c4f9823c1c8c40a7d8514e0217 | ee8cb974f12977894f7f0fda5b8129570224618b | /gim/core/migrations/0030_auto__del_unique_milestone_repository_number.py | c3757774bc525644d74039b596210a07f04dccd0 | [] | no_license | derekey/github-issues-manager | 996b3c7b9acd0362b7d99948d45a15ea05d58cc2 | 63a405b993e77f10b9c2b6d9790aae7576d9d84f | refs/heads/develop | 2021-01-21T01:03:01.739800 | 2014-11-09T21:26:49 | 2014-11-09T21:26:49 | 42,234,954 | 1 | 0 | null | 2015-09-10T09:22:40 | 2015-09-10T09:22:39 | null | UTF-8 | Python | false | false | 33,049 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'Milestone', fields ['repository', 'number']
db.delete_unique(u'core_milestone', ['repository_id', 'number'])
def backwards(self, orm):
# Adding unique constraint on 'Milestone', fields ['repository', 'number']
db.create_unique(u'core_milestone', ['repository_id', 'number'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.availablerepository': {
'Meta': {'ordering': "('organization_username', 'repository')", 'unique_together': "(('user', 'repository'),)", 'object_name': 'AvailableRepository'},
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'organization_username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'permission': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Repository']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'available_repositories_set'", 'to': u"orm['core.GithubUser']"})
},
u'core.commit': {
'Meta': {'ordering': "('committed_at',)", 'unique_together': "(('repository', 'sha'),)", 'object_name': 'Commit'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'commits_authored'", 'null': 'True', 'to': u"orm['core.GithubUser']"}),
'author_email': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'author_name': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'authored_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'comments_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'committed_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'committer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'commits__commited'", 'null': 'True', 'to': u"orm['core.GithubUser']"}),
'committer_email': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'committer_name': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'commits'", 'to': u"orm['core.Repository']"}),
'sha': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'tree': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'})
},
u'core.githubuser': {
'Meta': {'ordering': "('username',)", 'object_name': 'GithubUser'},
'available_repositories': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['core.Repository']", 'through': u"orm['core.AvailableRepository']", 'symmetrical': 'False'}),
'available_repositories_set_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'available_repositories_set_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'avatar_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'github_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_organization': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'org_repositories_fetch_data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'organizations_rel_+'", 'to': u"orm['core.GithubUser']"}),
'organizations_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'organizations_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'members'", 'symmetrical': 'False', 'to': u"orm['core.Team']"}),
'teams_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'teams_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'token': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'core.issue': {
'Meta': {'unique_together': "(('repository', 'number'),)", 'object_name': 'Issue'},
'assignee': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assigned_issues'", 'null': 'True', 'to': u"orm['core.GithubUser']"}),
'base_label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'base_sha': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'body_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'closed_issues'", 'null': 'True', 'to': u"orm['core.GithubUser']"}),
'closed_by_fetched': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'comments_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'comments_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'comments_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'commits': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'issues'", 'symmetrical': 'False', 'to': u"orm['core.Commit']"}),
'commits_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'commits_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'events_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'events_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'files_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'files_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_pr_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
'head_label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'head_sha': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_pull_request': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'labels': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'issues'", 'symmetrical': 'False', 'to': u"orm['core.Label']"}),
'mergeable': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'mergeable_state': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'merged': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'merged_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'merged_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'merged_prs'", 'null': 'True', 'to': u"orm['core.GithubUser']"}),
'milestone': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'issues'", 'null': 'True', 'to': u"orm['core.Milestone']"}),
'nb_additions': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'nb_changed_files': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'nb_commits': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'nb_deletions': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'pr_comments_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'pr_comments_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'pr_comments_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'pr_fetched_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'issues'", 'to': u"orm['core.Repository']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_issues'", 'to': u"orm['core.GithubUser']"})
},
u'core.issuecomment': {
'Meta': {'ordering': "('created_at',)", 'object_name': 'IssueComment'},
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'body_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': u"orm['core.Issue']"}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': u"orm['core.Repository']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'issue_comments'", 'to': u"orm['core.GithubUser']"})
},
u'core.issueevent': {
'Meta': {'ordering': "('created_at', 'github_id')", 'object_name': 'IssueEvent'},
'commit_sha': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'event': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '256', 'null': 'True', 'blank': 'True'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'events'", 'to': u"orm['core.Issue']"}),
'related_content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'related_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'issues_events'", 'to': u"orm['core.Repository']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'issues_events'", 'null': 'True', 'to': u"orm['core.GithubUser']"})
},
u'core.label': {
'Meta': {'ordering': "('label_type', 'order', 'typed_name')", 'unique_together': "(('repository', 'name'),)", 'object_name': 'Label', 'index_together': "(('repository', 'label_type', 'order'),)"},
'api_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'labels'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['core.LabelType']"}),
'name': ('django.db.models.fields.TextField', [], {}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'labels'", 'to': u"orm['core.Repository']"}),
'typed_name': ('django.db.models.fields.TextField', [], {'db_index': 'True'})
},
u'core.labeltype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('repository', 'name'),)", 'object_name': 'LabelType'},
'edit_details': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'edit_mode': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'db_index': 'True'}),
'regex': ('django.db.models.fields.TextField', [], {}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'label_types'", 'to': u"orm['core.Repository']"})
},
u'core.milestone': {
'Meta': {'ordering': "('number',)", 'object_name': 'Milestone'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'milestones'", 'to': u"orm['core.GithubUser']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'due_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'milestones'", 'to': u"orm['core.Repository']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'db_index': 'True'})
},
u'core.pullrequestcomment': {
'Meta': {'ordering': "('created_at',)", 'object_name': 'PullRequestComment'},
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'body_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'entry_point': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': u"orm['core.PullRequestCommentEntryPoint']"}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pr_comments'", 'to': u"orm['core.Issue']"}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pr_comments'", 'to': u"orm['core.Repository']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pr_comments'", 'to': u"orm['core.GithubUser']"})
},
u'core.pullrequestcommententrypoint': {
'Meta': {'ordering': "('created_at',)", 'unique_together': "(('issue', 'original_commit_sha', 'path', 'original_position'),)", 'object_name': 'PullRequestCommentEntryPoint'},
'commit_sha': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'diff_hunk': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pr_comments_entry_points'", 'to': u"orm['core.Issue']"}),
'original_commit_sha': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'original_position': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pr_comments_entry_points'", 'to': u"orm['core.Repository']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'pr_comments_entry_points'", 'null': 'True', 'to': u"orm['core.GithubUser']"})
},
u'core.pullrequestfile': {
'Meta': {'ordering': "('path',)", 'unique_together': "(('tree', 'sha', 'path'),)", 'object_name': 'PullRequestFile'},
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'files'", 'to': u"orm['core.Issue']"}),
'nb_additions': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'nb_changes': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'nb_deletions': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'patch': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pr_files'", 'to': u"orm['core.Repository']"}),
'sha': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'tree': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'})
},
u'core.repository': {
'Meta': {'ordering': "('owner', 'name')", 'unique_together': "(('owner', 'name'),)", 'object_name': 'Repository'},
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'repositories'", 'symmetrical': 'False', 'to': u"orm['core.GithubUser']"}),
'collaborators_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'collaborators_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'comments_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'comments_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'events_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'events_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'first_fetch_done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'github_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
'has_issues': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hook_set': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hooks_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'hooks_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_fork': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'issues_events_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'issues_events_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'issues_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'issues_state_closed_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'issues_state_open_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'labels_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'labels_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'milestones_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'milestones_state_closed_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'milestones_state_open_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owned_repositories'", 'to': u"orm['core.GithubUser']"}),
'pr_comments_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'pr_comments_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'prs_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'prs_state_closed_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'prs_state_open_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
u'core.team': {
'Meta': {'ordering': "('name',)", 'object_name': 'Team'},
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'org_teams'", 'to': u"orm['core.GithubUser']"}),
'permission': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'repositories': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'to': u"orm['core.Repository']"}),
'repositories_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'repositories_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['core'] | [
"[email protected]"
] | |
35547dfbf0da2184a2d66b93a10e753d236b05d0 | a2e638cd0c124254e67963bda62c21351881ee75 | /Python modules/MR_FRN.py | 7e2641a53a7dc52193f36d0754f261e9d80fedfb | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,672 | py | '''
Purpose :[Market Risk feed files],[Added section for generic instruments and updated StrCfPayFctrVAL and StrCfPayVAL]
Department and Desk :[IT],[Market Risk]
Requester: :[Natalie Austin],[Susan Kruger]
Developer :[Douglas Finkel],[Willie van der Bank]
CR Number :[264536,445783, 644358],[790080,796426 07/10/2011,14/10/2011]
2015-09-07 :Chris Human http://abcap-jira/browse/MINT-362
Description :Remove time zero cashflows from structured instruments
Date CR Requestor Developer Change
----------------------------------------------------------------------------------------
2020-09-11 CHG0128302 Garth Saunders Heinrich Cronje https://absa.atlassian.net/browse/CMRI-776
'''
import ael, string, acm, PositionFile, MR_MainFunctions, UserDict
class GenericCF(UserDict.UserDict):
'''
Wacky dictionary
'''
def __init__(self):
UserDict.UserDict.__init__(self)
def __setitem__(self, key, val):
UserDict.UserDict.__setitem__(self, key, val)
setattr(self, key, val)
def GetForwardPrice(instrument, date):
''' Return the forward price for an instrument on the given date.
'''
calcSpace = acm.Calculations().CreateStandardCalculationsSpaceCollection()
forwardPrice = instrument.Calculation().ForwardPrice(calcSpace, date).Number()
return forwardPrice
InsL = []
# OPENFILE ##########################################################################################################
def OpenFile(temp,FileDir,Filename,PositionName,*rest):
filename = FileDir + Filename
PositionFilename = FileDir + PositionName
outfile = open(filename, 'w')
outfileP = open(PositionFilename, 'w')
outfile.close()
outfileP.close()
del InsL[:]
InsL[:] = []
return filename
# OPENFILE ##########################################################################################################
# WRITE - FILE ######################################################################################################
def Write(i,FileDir,Filename,PositionName,*rest):
filename = FileDir + Filename
PositionFilename = FileDir + PositionName
ins = acm.FInstrument[i.insaddr]
# trade = acm.FTrade[t.trdnbr]
if (i.insaddr) not in InsL:
InsL.append(i.insaddr)
outfile = open(filename, 'a')
#Base record
BASFLAG = 'BAS'
HeaderName = 'Floating Rate Note'
OBJECT = 'Structured InstrumentSPEC'
TYPE = 'Structured Instrument'
NAME = MR_MainFunctions.NameFix(i.insid)
IDENTIFIER = 'insaddr_'+str(i.insaddr)
CurrencyCAL = ''
CurrencyDAYC = ''
CurrencyPERD = ''
CurrencyUNIT = i.curr.insid
try:
if ins.MappedDiscountLink().Link().YieldCurveComponent().Issuer() and ins.MappedDiscountLink().Link().YieldCurveComponent().RecordType() == 'YCAttribute':
DiscountCurveXREF = ins.MappedDiscountLink().Link().UnderlyingComponent().AsString().rsplit(',')[0].lstrip("'").rstrip("'")
CrdtSprdCurveXREF = MR_MainFunctions.NameFix(ins.MappedDiscountLink().Link().YieldCurveComponent().Issuer().Name()) + '_' + i.curr.insid + '_SpreadCurve'
else:
DiscountCurveXREF = MR_MainFunctions.NameFix(ins.MappedDiscountLink().Link().AsString().rsplit(',')[0].lstrip("'").rstrip("'"))
CrdtSprdCurveXREF = ''
except:
DiscountCurveXREF = MR_MainFunctions.NameFix(ins.MappedDiscountLink().Link().AsString().rsplit(',')[0].lstrip("'").rstrip("'"))
CrdtSprdCurveXREF = ''
try:
yc = ins.MappedDiscountLink().Link().YieldCurveComponent().Curve()
if yc.Type() == 'Instrument Spread' and yc.RiskType() == 'Interest Rate':
DiscountCurveXREF = ins.Name() + '_Curve'
except:
a = 1
InitialIndxLvlFUNC = ''
InitialIndxLvlUNIT = ''
InitialIndxLvlVAL = '0'
InitialIndxLvlSTRG = ''
TheoModelXREF = 'General Structured Instrument'
MarketModelXREF = ''
FairValueModelXREF = ''
outfile.write('%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n'%(BASFLAG, HeaderName, OBJECT, TYPE, NAME, IDENTIFIER, CurrencyCAL, CurrencyDAYC, CurrencyPERD, CurrencyUNIT, DiscountCurveXREF, CrdtSprdCurveXREF, InitialIndxLvlFUNC, InitialIndxLvlUNIT, InitialIndxLvlVAL, InitialIndxLvlSTRG, TheoModelXREF, MarketModelXREF, FairValueModelXREF))
#Rollover record
BASFLAG = 'rm_ro'
HeaderName = 'Floating Rate Note : Structured CashFlow'
ATTRIBUTE = 'Structured CashFlow'
OBJECT = 'Structured InstrumentSPEC'
Legs = i.legs()
for l in Legs:
StrCfCapMatDATE = ''
StrCfCoupFctrNB = ''
StrCfCoupRateCAL = ''
StrCfCoupRateDAYC = ''
StrCfCoupRatePERD = ''
StrCfCoupRateVAL = ''
StrCfCrvIndexXREF = ''
StrCfCurRateVAL = ''
StrCfDiscENUM = ''
StrCfFwdEndDATE = ''
StrCfFwdStartDATE = ''
StrCfHidOddCpFLAG = ''
StrCfInstrFctrVAL = ''
StrCfInstrSprdVAL = ''
StrCfPayCAL = ''
StrCfPayDATE = ''
StrCfPayDAYC = ''
StrCfPayFctrVAL = ''
StrCfPayPERD = ''
StrCfPayTYPE = ''
StrCfPayUNIT = ''
StrCfPayVAL = ''
StrCfProcXREF = ''
StrCfProratedFLAG = ''
StrCfRealEndDATE = ''
StrCfRealStartDATE = ''
StrCfRstDateDATE = ''
StrCfTheoEndDATE = ''
StrCfTheoStartDATE = ''
CFS = []
if ins.Generic(): #Build cash flows for generic FRNs
cf = GenericCF()
cal = ins.Legs()[0].PayCalendar().Name()
Start = ael.date_today().add_banking_day(ael.Calendar[cal], ins.SpotBankingDaysOffset())
if ins.ExpiryPeriod_unit() == 'Years':
End = acm.Time.DateAddDelta(ael.date(Start), ins.ExpiryPeriod_count(), 0, 0)
elif (ins.ExpiryPeriod_unit() == 'Days' and ins.ExpiryPeriod_count() == 365):
End = acm.Time.DateAddDelta(ael.date(Start), 1, 0, 0)
elif ins.ExpiryPeriod_unit() == 'Days':
End = acm.Time.DateAddDelta(ael.date(Start), 0, 0, ins.ExpiryPeriod_count())
BusEnd = ael.date(End).adjust_to_banking_day(ael.Calendar[cal], ins.Legs()[0].PayDayMethod())
if ins.Legs()[0].RollingPeriodUnit() == 'Days':
startday = ael.date(acm.Time.DateAddDelta(ael.date(End), 0, 0, -1)).adjust_to_banking_day(ael.Calendar[cal], ins.Legs()[0].PayDayMethod())
if ins.Legs()[0].RollingPeriodUnit() == 'Months':
startday = ael.date(acm.Time.DateAddDelta(ael.date(End), 0, -3, 0)).adjust_to_banking_day(ael.Calendar[cal], ins.Legs()[0].PayDayMethod())
payday = BusEnd
cf = GenericCF()
cf['pay_day'] = payday
cf['end_day'] = payday
cf['start_day'] = startday
cf['type'] = 'Generic'
CFS.append(cf)
cf = GenericCF()
cf['type'] = 'Fixed Amount'
cf['pay_day'] = payday
cf['end_day'] = ''
cf['start_day'] = ''
CFS.append(cf)
if ins.Legs()[0].RollingPeriodUnit() == 'Months':
ind = 1
while ind <= 3:
payday = ael.date(acm.Time.DateAddDelta(ael.date(End), 0, -(ind*3), 0)).adjust_to_banking_day(ael.Calendar[cal], ins.Legs()[0].PayDayMethod())
startday = ael.date(acm.Time.DateAddDelta(ael.date(End), 0, -(ind*3+3), 0)).adjust_to_banking_day(ael.Calendar[cal], ins.Legs()[0].PayDayMethod())
cf = GenericCF()
cf['pay_day'] = payday
cf['end_day'] = payday
cf['start_day'] = startday
cf['type'] = 'Generic'
CFS.append(cf)
ind = ind + 1
else:
CFS = l.cash_flows()
#for cf in l.cash_flows():
for cf in CFS:
StrCfCapMatDATE = ''
StrCfCoupFctrNB = ''
StrCfCoupRateCAL = ''
StrCfCoupRateDAYC = ''
StrCfCoupRatePERD = ''
StrCfCoupRateVAL = ''
StrCfCrvIndexXREF = ''
StrCfCurRateVAL = ''
StrCfDiscENUM = ''
StrCfFwdEndDATE = ''
StrCfFwdStartDATE = ''
StrCfHidOddCpFLAG = ''
StrCfInstrFctrVAL = ''
StrCfInstrSprdVAL = ''
StrCfPayCAL = ''
StrCfPayDATE = ''
StrCfPayDAYC = ''
StrCfPayFctrVAL = ''
StrCfPayPERD = ''
StrCfPayTYPE = ''
StrCfPayUNIT = ''
StrCfPayVAL = ''
StrCfProcXREF = ''
StrCfProratedFLAG = ''
StrCfRealEndDATE = ''
StrCfRealStartDATE = ''
StrCfRstDateDATE = ''
StrCfTheoEndDATE = ''
StrCfTheoStartDATE = ''
# calc = cf.Calculation()
if MR_MainFunctions.Datefix(cf.pay_day) > MR_MainFunctions.Datefix(ael.date_today()) and MR_MainFunctions.Datefix(cf.pay_day) != '':
if cf.type == 'Fixed Amount':
StrCfPayDATE = MR_MainFunctions.Datefix(cf.pay_day)
StrCfPayTYPE = 'Notional'
StrCfPayUNIT = l.curr.insid
if not ins.Generic():
StrCfPayVAL = cf.nominal_amount() # PayRecSign*i.contr_size*cf.nominal_factor
else:
StrCfPayVAL = i.contr_size
StrCfCrvIndexXREF = ''
#StrCfCapMatDATE = MR_MainFunctions.Datefix(str(i.exp_day))
#StrCfFwdStartDATE = MR_MainFunctions.Datefix(str(cf.start_day))
elif cf.type == 'Float Rate':
try:
for reset in cf.resets():
StrCfCapMatDATE = MR_MainFunctions.Datefix(str(i.exp_day))
StrCfFwdStartDATE = MR_MainFunctions.Datefix(str(cf.start_day))
StrCfCoupFctrNB = '0'
StrCfCoupRateDAYC = MR_MainFunctions.DayCountFix(l.daycount_method)
StrCfCoupRatePERD = 'simple'
StrCfCoupRateVAL = str(l.spread)
StrCfCrvIndexXREF = ''
float_rate = getattr(l, 'float_rate')
if float_rate:
output = float_rate.insid
# Add compounding convention
cfspread = getattr(l, 'spread')
rtype = getattr(l, 'reset_type')
lrp = getattr(l, 'reset_period')
comp = str(MR_MainFunctions.CompoundConvention(cfspread, rtype, lrp))
StrCfCrvIndexXREF = 'SCI_' + output + '_' + str(getattr(l, 'reset_type')) + '_' + str(getattr(l, 'reset_period.count')) + str(getattr(l, 'reset_period.unit')) + '_' + str(l.reset_day_offset) + str(l.reset_day_method) + comp
else:
StrCfCrvIndexXREF = ''
#StrCfCrvIndexXREF = 'StrCfCrvIndexXREF'
StrCfCurRateVAL = str(reset.value / 100)
StrCfDiscENUM = 'In Fine'
StrCfFwdEndDATE = MR_MainFunctions.Datefix(str(cf.end_day))
#StrCfFwdStartDATE = MR_MainFunctions.Datefix(str(reset.day))
StrCfHidOddCpFLAG = 'FALSE'
StrCfInstrFctrVAL = '1'
StrCfInstrSprdVAL = '0'
StrCfPayDATE = MR_MainFunctions.Datefix(str(cf.pay_day))
StrCfPayTYPE = 'Float'
StrCfPayUNIT = l.curr.insid
StrCfPayFctrVAL = cf.float_rate_factor
StrCfPayVAL = cf.nominal_amount() # calc.Nominal(CalcSpace(cs), Trade).Number()
StrCfProratedFLAG = 'TRUE'
StrCfRealEndDATE = MR_MainFunctions.Datefix(str(cf.end_day))
StrCfRealStartDATE = MR_MainFunctions.Datefix(str(cf.start_day))
StrCfRstDateDATE = MR_MainFunctions.Datefix(str(cf.start_day))
except:
for reset in cf.resets():
StrCfCapMatDATE = MR_MainFunctions.Datefix(str(i.exp_day))
StrCfFwdStartDATE = MR_MainFunctions.Datefix(str(cf.start_day))
StrCfCoupFctrNB = '0'
StrCfCoupRateDAYC = MR_MainFunctions.DayCountFix(l.daycount_method)
StrCfCoupRatePERD = 'simple'
StrCfCoupRateVAL = str(l.spread)
StrCfCrvIndexXREF = ''
float_rate = getattr(l, 'float_rate')
if float_rate:
output = float_rate.insid
# Add compounding convention
cfspread = getattr(l, 'spread')
rtype = getattr(l, 'reset_type')
lrp = getattr(l, 'reset_period')
comp = str(MR_MainFunctions.CompoundConvention(cfspread, rtype, lrp))
StrCfCrvIndexXREF = 'SCI_' + output + '_' + str(getattr(l, 'reset_type')) + '_' + str(getattr(l, 'reset_period.count')) + str(getattr(l, 'reset_period.unit')) + '_' + str(l.reset_day_offset) + str(l.reset_day_method) + comp
else:
StrCfCrvIndexXREF = ''
# StrCfCrvIndexXREF = 'StrCfCrvIndexXREF'
StrCfCurRateVAL = str(reset.value / 100)
StrCfDiscENUM = 'In Fine'
StrCfFwdEndDATE = MR_MainFunctions.Datefix(str(cf.end_day))
#StrCfFwdStartDATE = MR_MainFunctions.Datefix(str(reset.day))
StrCfHidOddCpFLAG = 'FALSE'
StrCfInstrFctrVAL = '1'
StrCfInstrSprdVAL = '0'
StrCfPayDATE = MR_MainFunctions.Datefix(str(cf.pay_day))
StrCfPayTYPE = 'Float'
StrCfPayUNIT = l.curr.insid
StrCfPayVAL = cf.nominal_amount() # PayRecSign*i.contr_size*cf.nominal_factor # calc.Nominal(CalcSpace(cs), Trade).Number()
StrCfProratedFLAG = 'TRUE'
StrCfRealEndDATE = MR_MainFunctions.Datefix(str(cf.end_day))
StrCfRealStartDATE = MR_MainFunctions.Datefix(str(cf.start_day))
StrCfRstDateDATE = MR_MainFunctions.Datefix(str(cf.start_day))
elif cf.type == 'Generic':
StrCfCapMatDATE = MR_MainFunctions.Datefix(str(BusEnd))
StrCfFwdStartDATE = MR_MainFunctions.Datefix(str(cf.start_day))
StrCfCoupFctrNB = '0'
StrCfCoupRateDAYC = MR_MainFunctions.DayCountFix(l.daycount_method)
StrCfCoupRatePERD = 'simple'
StrCfCoupRateVAL = str(l.spread)
StrCfCrvIndexXREF = ''
float_rate = getattr(l, 'float_rate')
if float_rate:
output = float_rate.insid
# Add compounding convention
cfspread = getattr(l, 'spread')
rtype = getattr(l, 'reset_type')
lrp = getattr(l, 'reset_period')
comp = str(MR_MainFunctions.CompoundConvention(cfspread, rtype, lrp))
StrCfCrvIndexXREF = 'SCI_' + output + '_' + str(getattr(l, 'reset_type')) + '_' + str(getattr(l, 'reset_period.count')) + str(getattr(l, 'reset_period.unit')) + '_' + str(l.reset_day_offset) + str(l.reset_day_method) + comp
else:
StrCfCrvIndexXREF = ''
# StrCfCrvIndexXREF = 'StrCfCrvIndexXREF'
#StrCfCurRateVAL = str(reset.value / 100)
StrCfCurRateVAL = str(GetForwardPrice(ins.Legs()[0].FloatRateReference(), cf.start_day) / 100)
StrCfDiscENUM = 'In Fine'
StrCfFwdEndDATE = MR_MainFunctions.Datefix(str(cf.end_day))
#StrCfFwdStartDATE = MR_MainFunctions.Datefix(str(reset.day))
StrCfHidOddCpFLAG = 'FALSE'
StrCfInstrFctrVAL = '1'
StrCfInstrSprdVAL = '0'
StrCfPayDATE = MR_MainFunctions.Datefix(str(cf.pay_day))
StrCfPayTYPE = 'Float'
StrCfPayUNIT = l.curr.insid
StrCfPayVAL = i.contr_size # PayRecSign*i.contr_size*cf.nominal_factor # calc.Nominal(CalcSpace(cs), Trade).Number()
StrCfProratedFLAG = 'TRUE'
StrCfRealEndDATE = MR_MainFunctions.Datefix(str(cf.end_day))
StrCfRealStartDATE = MR_MainFunctions.Datefix(str(cf.start_day))
StrCfRstDateDATE = MR_MainFunctions.Datefix(str(cf.start_day))
else:
StrCfCapMatDATE = MR_MainFunctions.Datefix(str(i.exp_day))
StrCfFwdStartDATE = MR_MainFunctions.Datefix(str(cf.start_day))
StrCfCoupFctrNB = '0'
StrCfCoupRateCAL = ''
StrCfCoupRateDAYC = MR_MainFunctions.DayCountFix(l.daycount_method)
StrCfCoupRatePERD = 'simple'
StrCfCoupRateVAL = str(cf.rate)
StrCfHidOddCpFLAG = 'FALSE'
StrCfPayDATE = MR_MainFunctions.Datefix(str(cf.pay_day))
StrCfPayTYPE = 'Fixed'
StrCfPayUNIT = l.curr.insid
StrCfPayVAL = cf.nominal_amount() # PayRecSign*i.contr_size*cf.nominal_factor
StrCfProratedFLAG = 'TRUE'
StrCfRealEndDATE = MR_MainFunctions.Datefix(str(cf.end_day))
StrCfRealStartDATE = MR_MainFunctions.Datefix(str(cf.start_day))
outfile.write('%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n'%(BASFLAG, HeaderName, ATTRIBUTE, OBJECT, StrCfCapMatDATE, StrCfCoupFctrNB, StrCfCoupRateCAL, StrCfCoupRateDAYC, StrCfCoupRatePERD, StrCfCoupRateVAL, StrCfCrvIndexXREF, StrCfCurRateVAL, StrCfDiscENUM, StrCfFwdEndDATE, StrCfFwdStartDATE, StrCfHidOddCpFLAG, StrCfInstrFctrVAL, StrCfInstrSprdVAL, StrCfPayCAL, StrCfPayDATE, StrCfPayDAYC, StrCfPayFctrVAL, StrCfPayPERD, StrCfPayTYPE, StrCfPayUNIT, StrCfPayVAL, StrCfProcXREF, StrCfProratedFLAG, StrCfRealEndDATE, StrCfRealStartDATE, StrCfRstDateDATE, StrCfTheoEndDATE, StrCfTheoStartDATE))
outfile.close()
#Position
for trades in i.trades():
if MR_MainFunctions.ValidTradeNo(trades) == 0:
if MR_MainFunctions.IsExcludedPortfolio(trades) == False:
if trades.category != 'Collateral': #Collateral trades must be excluded from market risk calculations
PositionFile.CreatePosition(trades, PositionFilename)
return i.insid
# WRITE - FILE ######################################################################################################
| [
"[email protected]"
] | |
e5b2ae4c4440478fd5c24f1011f898ba4711d6d7 | 153da69b35f032f5b83a06f17008ba41a1b336b4 | /src/main/hspylib/modules/mock/mock_request.py | 4742062ecc57e21e043658fa728a7644c1327e85 | [
"MIT"
] | permissive | TrendingTechnology/hspylib | 6400cadf9dfe6ab5733712dcfeccf8022d61c589 | c79a2c17e89fe21d00ccd9c1646a03407cd61839 | refs/heads/master | 2023-06-20T15:47:35.962661 | 2021-07-19T22:12:18 | 2021-07-19T23:45:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,564 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
TODO Purpose of the file
@project: HSPyLib
hspylib.main.hspylib.modules.mock
@file: mock_request.py
@created: Tue, 4 May 2021
@author: <B>H</B>ugo <B>S</B>aporetti <B>J</B>unior"
@site: https://github.com/yorevs/hspylib
@license: MIT - Please refer to <https://opensource.org/licenses/MIT>
Copyright 2021, HSPyLib team
"""
from typing import Any
from requests.structures import CaseInsensitiveDict
from hspylib.core.enums.charset import Charset
from hspylib.core.enums.content_type import ContentType
from hspylib.core.enums.http_code import HttpCode
from hspylib.core.enums.http_method import HttpMethod
from hspylib.modules.fetch.http_response import HttpResponse
class MockResponse(HttpResponse):
"""TODO"""
def __init__(self,
parent,
method: HttpMethod,
url: str,
status_code: HttpCode = None,
body: str = None,
headers=None,
encoding: Charset = Charset.UTF_8,
content_type=ContentType.APPLICATION_JSON):
super().__init__(method, url, status_code, body, headers, encoding, content_type)
self.parent = parent
self.received_body = False
def then_return(
self,
code: HttpCode,
body: str = None,
headers=None,
encoding: Charset = Charset.UTF_8,
content_type=ContentType.APPLICATION_JSON) -> Any:
"""TODO"""
response = self.parent.mock(self.method, self.url)
response.status_code = code
response.body = body
response.headers = headers if headers else []
response.encoding = encoding
response.content_type = content_type
if response.content_type:
response.content_type.charset = encoding
return self.parent
def then_return_with_received_body(
self,
code: HttpCode,
headers: CaseInsensitiveDict = None,
encoding: Charset = Charset.UTF_8,
content_type=ContentType.APPLICATION_JSON) -> Any:
"""TODO"""
response = self.parent.mock(self.method, self.url)
response.received_body = True
response.body = None
response.status_code = code
response.headers = headers if headers else []
response.encoding = encoding
response.content_type = content_type
if response.content_type:
response.content_type.charset = encoding
return self.parent
| [
"[email protected]"
] | |
8309f762bf07a0defed03dbce33a49f1d8a33ac1 | 3f1fb9704f76f0fa29723267595be1cc68a55248 | /Alignment/OfflineValidation/test/PrimaryVertexResolution_templ_cfg.py | 8c2c84451bec65c0380fa4d278e51e02e60b8eaa | [
"Apache-2.0"
] | permissive | jeongsumin/cmssw | 639838651cbaf007a4c3d0df6fa33f705326c95d | 54acaec3dc59abda01c018920077db98db976746 | refs/heads/master | 2021-07-05T15:32:01.271717 | 2020-10-15T04:31:06 | 2020-10-15T04:31:06 | 189,147,548 | 0 | 0 | Apache-2.0 | 2019-05-29T03:57:00 | 2019-05-29T03:56:59 | null | UTF-8 | Python | false | false | 7,514 | py | #! /bin/env cmsRun
'''
cfg to produce pv resolution plots
here doing refit of tracks and vertices using latest alignment
'''
from __future__ import print_function
import FWCore.ParameterSet.Config as cms
from fnmatch import fnmatch
import FWCore.ParameterSet.VarParsing as VarParsing
from pdb import set_trace
process = cms.Process("PrimaryVertexResolution")
###################################################################
def best_match(rcd):
###################################################################
'''
find out where to best match the input conditions
'''
print(rcd)
for pattern, string in connection_map:
print(pattern, fnmatch(rcd, pattern))
if fnmatch(rcd, pattern):
return string
options = VarParsing.VarParsing("analysis")
options.register('lumi',
1.,
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.float, # string, int, or float
"luminosity used")
options.register ('outputRootFile',
"pvresolution_YYY_KEY_YYY_XXX_RUN_XXX.root",
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"output root file")
options.register ('records',
[],
VarParsing.VarParsing.multiplicity.list, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"record:tag names to be used/changed from GT")
options.register ('external',
[],
VarParsing.VarParsing.multiplicity.list, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"record:fle.db picks the following record from this external file")
options.register ('GlobalTag',
'110X_dataRun3_Prompt_v3',
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"Global Tag to be used")
options.parseArguments()
print("conditionGT : ", options.GlobalTag)
print("conditionOverwrite: ", options.records)
print("external conditions:", options.external)
print("outputFile : ", options.outputRootFile)
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr = cms.untracked.PSet(placeholder = cms.untracked.bool(True))
process.MessageLogger.cout = cms.untracked.PSet(INFO = cms.untracked.PSet(
reportEvery = cms.untracked.int32(1000) # every 100th only
# limit = cms.untracked.int32(10) # or limit to 10 printouts...
))
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(150000) )
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff")
process.load('Configuration.Geometry.GeometryRecoDB_cff')
process.load('Configuration/StandardSequences/Services_cff')
process.load('TrackingTools.TransientTrack.TransientTrackBuilder_cfi')
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(XXX_FILES_XXX)
)
###################################################################
# Tell the program where to find the conditons
connection_map = [
('Tracker*', 'frontier://PromptProd/CMS_CONDITIONS'),
('SiPixel*', 'frontier://PromptProd/CMS_CONDITIONS'),
('SiStrip*', 'frontier://PromptProd/CMS_CONDITIONS'),
('Beam*', 'frontier://PromptProd/CMS_CONDITIONS'),
]
if options.external:
connection_map.extend(
(i.split(':')[0], 'sqlite_file:%s' % i.split(':')[1]) for i in options.external
)
connection_map.sort(key=lambda x: -1*len(x[0]))
###################################################################
# creat the map for the GT toGet
records = []
if options.records:
for record in options.records:
rcd, tag = tuple(record.split(':'))
records.append(
cms.PSet(
record = cms.string(rcd),
tag = cms.string(tag),
connect = cms.string(best_match(rcd))
)
)
process.load("RecoVertex.BeamSpotProducer.BeamSpot_cfi")
process.GlobalTag.globaltag = options.GlobalTag
#process.GlobalTag.DumpStat = cms.untracked.bool(True)
process.GlobalTag.toGet = cms.VPSet(*records)
process.load("RecoTracker.TrackProducer.TrackRefitters_cff")
# remove the following lines if you run on RECO files
process.TrackRefitter.src = 'ALCARECOTkAlMinBias'
process.TrackRefitter.NavigationSchool = ''
## PV refit
process.load("TrackingTools.TransientTrack.TransientTrackBuilder_cfi")
from RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi import offlinePrimaryVertices
process.offlinePrimaryVerticesFromRefittedTrks = offlinePrimaryVertices.clone()
process.offlinePrimaryVerticesFromRefittedTrks.TrackLabel = cms.InputTag("TrackRefitter")
process.offlinePrimaryVerticesFromRefittedTrks.vertexCollections.maxDistanceToBeam = 1
process.offlinePrimaryVerticesFromRefittedTrks.TkFilterParameters.maxNormalizedChi2 = 20
process.offlinePrimaryVerticesFromRefittedTrks.TkFilterParameters.minSiliconLayersWithHits = 5
process.offlinePrimaryVerticesFromRefittedTrks.TkFilterParameters.maxD0Significance = 5.0
# as it was prior to https://github.com/cms-sw/cmssw/commit/c8462ae4313b6be3bbce36e45373aa6e87253c59
process.offlinePrimaryVerticesFromRefittedTrks.TkFilterParameters.maxD0Error = 1.0
process.offlinePrimaryVerticesFromRefittedTrks.TkFilterParameters.maxDzError = 1.0
process.offlinePrimaryVerticesFromRefittedTrks.TkFilterParameters.minPixelLayersWithHits = 2
process.PrimaryVertexResolution = cms.EDAnalyzer('SplitVertexResolution',
storeNtuple = cms.bool(False),
intLumi = cms.untracked.double(options.lumi),
vtxCollection = cms.InputTag("offlinePrimaryVerticesFromRefittedTrks"),
trackCollection = cms.InputTag("TrackRefitter"),
minVertexNdf = cms.untracked.double(10.),
minVertexMeanWeight = cms.untracked.double(0.5),
runControl = cms.untracked.bool(True),
runControlNumber = cms.untracked.vuint32(int(XXX_RUN_XXX))
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string(options.outputRootFile),
closeFileFast = cms.untracked.bool(False)
)
process.p = cms.Path(process.offlineBeamSpot +
process.TrackRefitter +
process.offlinePrimaryVerticesFromRefittedTrks +
process.PrimaryVertexResolution)
| [
"[email protected]"
] | |
d7c88e4ca86943f69f81f3921df26bdc568e03f7 | 7b102f9c8f2e3f9240090d1d67af50333a2ba98d | /gbd_2017/shared_code/central_comp/nonfatal/dismod/cascade/varnish.py | 12362e5c319099abfeea8ab6a7ed8f6bbac1abc4 | [] | no_license | Nermin-Ghith/ihme-modeling | 9c8ec56b249cb0c417361102724fef1e6e0bcebd | 746ea5fb76a9c049c37a8c15aa089c041a90a6d5 | refs/heads/main | 2023-04-13T00:26:55.363986 | 2020-10-28T19:51:51 | 2020-10-28T19:51:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,164 | py | import sys
import os
import subprocess
import logging
import upload
import fit_stats
import drill
from db_tools.ezfuncs import query
from cascade_ode.demographics import Demographics
from cascade_ode import importer
from cascade_ode import __version__
from setup_logger import setup_logger
from jobmon import sge
from save_results._save_results import DismodSaveResults
# Set default file mask to readable-for all users
os.umask(0o0002)
AGE_GROUP_SET_ID = 12
def main():
'''Set commit hash, upload model, try to write effects_plots pdfs,
aggregate model version draws up location hierarchy
'''
setup_logger()
mvid = sys.argv[1]
log = logging.getLogger(__name__)
log.info("Varnish started for mvid {}".format(mvid))
try:
try:
commit_hash = sge.get_commit_hash(dir='%s/..' % drill.this_path)
except subprocess.CalledProcessError:
# in site-packages, not git repo
commit_hash = __version__
upload.set_commit_hash(mvid, commit_hash)
upload.upload_model(mvid)
outdir = "%s/%s/full" % (
drill.settings['cascade_ode_out_dir'],
str(mvid))
joutdir = "%s/%s" % (drill.settings['diag_out_dir'], mvid)
fit_stats.write_fit_stats(mvid, outdir, joutdir)
upload.upload_fit_stat(mvid)
# Write effect PDFs
plotter = "{}/effect_plots.r".format(drill.this_path)
plotter = os.path.realpath(plotter)
demo = Demographics()
try:
subprocess.check_output([
"FILEPATH",
plotter,
str(mvid),
joutdir,
drill.settings['cascade_ode_out_dir'],
str(max(demo.year_ids))],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
log.exception("Error in effect plots")
# Launch final aggregations
log.info("Starting Save Results")
aggregate_model(mvid, demo=demo)
except Exception:
log.exception("Error in varnish")
raise
def aggregate_model(mvid, demo):
'''call save_results to create location aggregates,
upload summaries to epi.model_estimate_final,
mark model as finished'''
agg_args = get_aggregation_arguments(mvid, demo)
dsr = DismodSaveResults(
input_dir=agg_args['input_dir'],
input_file_pattern=agg_args['input_file_pattern'],
model_version_id=mvid,
modelable_entity_id=agg_args['modelable_entity_id'],
description=agg_args['description'],
year_id=agg_args['year_id'],
sex_id=agg_args['sex_id'],
measure_id=agg_args['measure_id'],
db_env=agg_args['db_env'],
gbd_round_id=agg_args['gbd_round_id'],
birth_prevalence=agg_args['birth_prevalence'])
dsr.run()
return dsr
def get_aggregation_arguments(mvid, demo):
casc = drill.Cascade(
mvid, root_dir=drill.settings['cascade_ode_out_dir'],
reimport=False)
mvm = casc.model_version_meta
db_env = drill.settings['env_variables']['ENVIRONMENT_NAME']
agg_args = {}
agg_args['input_dir'] = os.path.join(casc.root_dir, 'draws')
agg_args['input_file_pattern'] = '{location_id}_{year_id}_{sex_id}.h5'
agg_args['modelable_entity_id'] = mvm.modelable_entity_id.item()
agg_args['description'] = mvm.description.item()
agg_args['year_id'] = demo.year_ids
agg_args['sex_id'] = demo.sex_ids
agg_args['measure_id'] = get_measures_from_casc(casc)
agg_args['db_env'] = db_env
agg_args['gbd_round_id'] = demo.gbd_round_id
agg_args['birth_prevalence'] = mvm.birth_prev.fillna(0).replace(
{0: False, 1: True}).item()
return agg_args
def get_measures_from_casc(casc):
measure_only = casc.model_version_meta.measure_only
if measure_only.notnull().all():
return measure_only.item()
q = "select measure_id from shared.measure where measure in ('{}')".format(
"', '".join(importer.integrand_pred))
df = query(q, conn_def="epi")
return sorted(df.measure_id.tolist())
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
99cde40683919de208741a0fa26e32d59716076f | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/leap/ca12b50208d34a91a0c096b495e0335f.py | 3379fdc004517ed04596ff5a900c04804f7681e2 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 126 | py | def is_leap_year(year):
if (year%4 == 0 and year%100 !=0) or (year%4 == 0 and year%400==0):
return True
return False
| [
"[email protected]"
] | |
e5e2e59c9ab3d23f95baa947c2fa5b9cadd2f20c | d7016f69993570a1c55974582cda899ff70907ec | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_02_01_preview/models/_network_management_client_enums.py | 928973d87b1432a5f248e44814aab212aca2c8e4 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 5,160 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from azure.core import CaseInsensitiveEnumMeta
class AccessRuleDirection(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Direction that specifies whether the access rules is inbound/outbound."""
INBOUND = "Inbound"
OUTBOUND = "Outbound"
class AddressPrefixType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Address prefix type."""
IP_PREFIX = "IPPrefix"
SERVICE_TAG = "ServiceTag"
class AdminRuleKind(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Whether the rule is custom or default."""
CUSTOM = "Custom"
DEFAULT = "Default"
class AssociationAccessMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Access mode on the association."""
LEARNING = "Learning"
ENFORCED = "Enforced"
AUDIT = "Audit"
class ConfigurationType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Configuration Deployment Type."""
SECURITY_ADMIN = "SecurityAdmin"
SECURITY_USER = "SecurityUser"
CONNECTIVITY = "Connectivity"
class ConnectivityTopology(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Connectivity topology type."""
HUB_AND_SPOKE = "HubAndSpoke"
MESH = "Mesh"
class CreatedByType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The type of identity that created the resource."""
USER = "User"
APPLICATION = "Application"
MANAGED_IDENTITY = "ManagedIdentity"
KEY = "Key"
class DeleteExistingNSGs(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Flag if need to delete existing network security groups."""
FALSE = "False"
TRUE = "True"
class DeleteExistingPeering(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Flag if need to remove current existing peerings."""
FALSE = "False"
TRUE = "True"
class DeploymentStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Deployment Status."""
NOT_STARTED = "NotStarted"
DEPLOYING = "Deploying"
DEPLOYED = "Deployed"
FAILED = "Failed"
class EffectiveAdminRuleKind(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Whether the rule is custom or default."""
CUSTOM = "Custom"
DEFAULT = "Default"
class EffectiveUserRuleKind(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Whether the rule is custom or default."""
CUSTOM = "Custom"
DEFAULT = "Default"
class GroupConnectivity(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Group connectivity type."""
NONE = "None"
DIRECTLY_CONNECTED = "DirectlyConnected"
class IsGlobal(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Flag if global mesh is supported."""
FALSE = "False"
TRUE = "True"
class MembershipType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Membership Type."""
STATIC = "Static"
DYNAMIC = "Dynamic"
class NspLinkStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The NSP link state."""
APPROVED = "Approved"
PENDING = "Pending"
REJECTED = "Rejected"
DISCONNECTED = "Disconnected"
class NspProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The current provisioning state."""
SUCCEEDED = "Succeeded"
CREATING = "Creating"
UPDATING = "Updating"
DELETING = "Deleting"
ACCEPTED = "Accepted"
FAILED = "Failed"
class ProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The current provisioning state."""
SUCCEEDED = "Succeeded"
UPDATING = "Updating"
DELETING = "Deleting"
FAILED = "Failed"
class SecurityConfigurationRuleAccess(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Whether network traffic is allowed or denied."""
ALLOW = "Allow"
DENY = "Deny"
ALWAYS_ALLOW = "AlwaysAllow"
class SecurityConfigurationRuleDirection(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The direction of the rule. The direction specifies if the rule will be evaluated on incoming or
outgoing traffic.
"""
INBOUND = "Inbound"
OUTBOUND = "Outbound"
class SecurityConfigurationRuleProtocol(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Network protocol this rule applies to."""
TCP = "Tcp"
UDP = "Udp"
ICMP = "Icmp"
ESP = "Esp"
ANY = "Any"
AH = "Ah"
class SecurityType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Security Type."""
ADMIN_POLICY = "AdminPolicy"
USER_POLICY = "UserPolicy"
class UseHubGateway(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Flag if need to use hub gateway."""
FALSE = "False"
TRUE = "True"
class UserRuleKind(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Whether the rule is custom or default."""
CUSTOM = "Custom"
DEFAULT = "Default"
| [
"[email protected]"
] | |
2162180fac3c2f22e1b133a7bd03f473cf420898 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /pDQ8sTXDxesqiTRuY_10.py | 41e821e37f31ef7c60499d78de1c6dcbdb72db34 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py |
import re
pattern = "(?<=tall height)"
| [
"[email protected]"
] | |
a0a6f63d91b35f4e1b0adeff1d440b64e0536984 | 3cdd7019f3acbf7b7a7e879444454703fcc73d62 | /solutions/319.bulb-switcher.py | 49544fa00a51fc10ac7fe2b65b52d85034bc6e67 | [] | no_license | quixoteji/Leetcode | 1dc2e52e53a7b58d9bae15ce2d5c4142cbd365af | 00bf9a8164008aa17507b1c87ce72a3374bcb7b9 | refs/heads/master | 2021-07-15T07:59:21.294297 | 2020-05-13T03:08:47 | 2020-05-13T03:08:47 | 138,812,553 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | #
# @lc app=leetcode id=319 lang=python3
#
# [319] Bulb Switcher
#
# @lc code=start
class Solution:
def bulbSwitch(self, n: int) -> int:
return self.sol1(n)
def sol1(self, n) :
# 0 : off 1 : on
ons = 1
while ons * ons <= n : ons += 1
return ons-1
# @lc code=end
| [
"[email protected]"
] | |
9165e19f93dedecb81d6600c76cdff8a1644df31 | 262311e60529868e38c2c57ee3db573f8e11c458 | /qa-automated/pages/app/guide_page.py | 4c174083b2c451704f06adb30ab6f1ce1a034b09 | [] | no_license | huileizhan227/untitled | 1c5604736d9ffcce6f7cb7e308cdc0ebd07e116a | 07df74c89291b1664a28e3c8dcba51a917f1835f | refs/heads/master | 2023-01-27T11:51:37.609210 | 2020-04-16T11:49:59 | 2020-04-16T11:49:59 | 150,606,504 | 1 | 0 | null | 2023-01-09T12:00:12 | 2018-09-27T15:12:18 | HTML | UTF-8 | Python | false | false | 380 | py | from poium import Page
from poium import PageElement
from poium import PageElements
class GuidPage(Page):
skip_btn = PageElement(id_='com.transsnet.news.more:id/skip')
topic_btn_list = PageElements(xpath='//*[@id="com.transsnet.news.more:id/recycler"]//*[@id="com.transsnet.news.more:id/name"]')
confirm_btn = PageElement(id_='com.transsnet.news.more:id/select_btn')
| [
"[email protected]"
] | |
a17a0329e2df352957ffa0acef086b440581e622 | c0717724c7dc3937252bb4a7bd7c796088db4c5d | /solutions/judgingmoose.py | 58fb1ab1e281b3934eb20ba684504c74ec01dce3 | [] | no_license | matthew-cheney/kattis-solutions | 58cd03394ad95e9ca7ffa3de66b69d90647b31ff | d9397ca4715a3ad576046a62bdd6c0fb9542d838 | refs/heads/main | 2023-01-24T12:49:18.871137 | 2020-12-10T04:10:48 | 2020-12-10T04:10:48 | 318,857,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | l, r = [int(x) for x in input().split(' ')]
if l == 0 and r == 0:
print('Not a moose')
elif l != r:
print(f'Odd {2 * max(l, r)}')
else:
print(f'Even {2 * l}') | [
"[email protected]"
] | |
bfd7447e28cd315f542b2e4c3883455d18c758b4 | 55dc6e337e634acb852c570274a1d0358b7300a5 | /tests/extension/resolver_/single_module/test_resolver_single_module.py | 91d5af2b94b4d6d03772bf6913b03b34a1fe7c9a | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | fifoteam/veriloggen | 97ad45671f053c85f495b08a030f735fd9822146 | 23cb7251c0f126d40d249982cad33ef37902afef | refs/heads/master | 2020-05-27T00:28:37.575411 | 2017-02-20T01:47:00 | 2017-02-20T01:47:00 | 82,518,602 | 2 | 0 | null | 2017-02-20T05:02:37 | 2017-02-20T05:02:37 | null | UTF-8 | Python | false | false | 1,162 | py | from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import resolver_single_module
expected_verilog = """
module blinkled #
(
parameter WIDTH = 8,
parameter INC = 1
)
(
input CLK,
input RST,
output reg [8-1:0] LED
);
reg [18-1:0] count;
always @(posedge CLK) begin
if(RST) begin
count <= 0;
end else begin
if(count == 1023) begin
count <= 0;
end else begin
count <= count + 1;
end
end
end
always @(posedge CLK) begin
if(RST) begin
LED <= 0;
end else begin
if(count == 1023) begin
LED <= LED + 1;
end
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = resolver_single_module.mkLed()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
| [
"[email protected]"
] | |
b78531b1f1e38104f41d807a2b3eaea73882c0fe | 6f05f7d5a67b6bb87956a22b988067ec772ba966 | /data/train/python/e658c13140c18d36a72cabf611a7dcaca154d8deurls.py | e658c13140c18d36a72cabf611a7dcaca154d8de | [
"MIT"
] | permissive | harshp8l/deep-learning-lang-detection | 93b6d24a38081597c610ecf9b1f3b92c7d669be5 | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | refs/heads/master | 2020-04-07T18:07:00.697994 | 2018-11-29T23:21:23 | 2018-11-29T23:21:23 | 158,597,498 | 0 | 0 | MIT | 2018-11-21T19:36:42 | 2018-11-21T19:36:41 | null | UTF-8 | Python | false | false | 2,069 | py | from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'acidentes_em_rodovias.controller.home', name='home'),
# url(r'^acidentes_em_rodovias/', include('acidentes_em_rodovias.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
url(r'^acidentes_rodovias/$', 'app.controller.index_controller.index'),
url(r'^acidentes_rodovias/regiao$', 'app.controller.consultabasica_regiao_controller.consulta_por_regiao'),
url(r'^acidentes_rodovias/periodo$', 'app.controller.consultabasica_periodo_controller.consulta_por_periodo'),
url(r'^acidentes_rodovias/municipios-regiao$', 'app.controller.consultabasica_regiao_controller.consulta_municipios_na_regiao'),
url(r'^acidentes_rodovias/consulta/municipio$', 'app.controller.consultabasica_regiao_controller.consulta_ocorrencias_por_municipio'),
url(r'^acidentes_rodovias/consulta/periodo$', 'app.controller.consultabasica_periodo_controller.consulta_ocorrencias_por_periodo'),
url(r'^acidentes_rodovias/estatisticas/tipos-acidentes$', 'app.controller.estatisticas_tipos_controller.tipos_acidentes'),
url(r'^acidentes_rodovias/estatisticas/causas-acidentes$', 'app.controller.estatisticas_causas_controller.causas_acidentes'),
url(r'^acidentes_rodovias/estatisticas/ocorrencias-envolvidos$', 'app.controller.estatisticas_envolvidos_controller.ocorrencias_e_envolvidos'),
url(r'^acidentes_rodovias/estatisticas/acidentes-sexo$', 'app.controller.estatisticas_envolvidos_controller.acidentes_sexo'),
url(r'^acidentes_rodovias/estatisticas/br$', 'app.controller.estatisticas_br_controller.acidentes_br'),
url(r'^acidentes_rodovias/estatisticas/uf$', 'app.controller.estatisticas_uf_controller.acidentes_uf'),
)
| [
"[email protected]"
] | |
c61fa4711a00062c8bb8b50504176552f291335f | 98420fdd66b8dce46ef88cd34fcace36777fa232 | /py3/torch_tutor1/main1.py | 1b70aba34044d998d433534cedb6c8f91228cf1b | [] | no_license | Daiver/jff | f972fe7464f78ba6008a036b697ea3f04b7010a4 | 33d6a781af8d7f6ae60c25e10051977af2fef1b9 | refs/heads/master | 2023-04-07T06:33:41.487938 | 2022-05-03T10:07:32 | 2022-05-03T10:07:32 | 12,180,634 | 1 | 1 | null | 2023-04-03T19:25:00 | 2013-08-17T15:03:14 | C++ | UTF-8 | Python | false | false | 107 | py | import torch
import numpy as np
if __name__ == '__main__':
x = torch.ones((2, 3)).cuda()
print(x)
| [
"[email protected]"
] | |
cc4e35adf99011ab4eaf8c5561d665063815d5ad | d308fffe3db53b034132fb1ea6242a509f966630 | /pirates/piratesgui/GameOptions.py | 5fd28ae10abd4d11e337bd0b1d91ebdac6a2391c | [
"BSD-3-Clause"
] | permissive | rasheelprogrammer/pirates | 83caac204965b77a1b9c630426588faa01a13391 | 6ca1e7d571c670b0d976f65e608235707b5737e3 | refs/heads/master | 2020-03-18T20:03:28.687123 | 2018-05-28T18:05:25 | 2018-05-28T18:05:25 | 135,193,362 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 112,675 | py | # uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.piratesgui.GameOptions
import copy, string, os, sys, datetime
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from pirates.piratesbase import PiratesGlobals
from pirates.piratesgui import PiratesGuiGlobals
from pirates.piratesgui.BorderFrame import BorderFrame
from pirates.piratesgui.GuiButton import GuiButton
from pirates.piratesgui.DialogButton import DialogButton
from pirates.piratesbase import PLocalizer
from pirates.piratesbase import Freebooter
from otp.otpgui import OTPDialog
from otp.otpbase import OTPGlobals
from otp.otpbase import OTPRender
from pirates.piratesgui import PDialog
from pirates.seapatch.Water import Water
from direct.motiontrail.MotionTrail import MotionTrail
from direct.directnotify import DirectNotifyGlobal
from pirates.piratesgui import GameOptionsMatrix
from pirates.piratesgui.GameOptionsGui import *
from pirates.uberdog.UberDogGlobals import InventoryType
try:
import embedded
except:
pass
class OptionSpace:
__module__ = __name__
notify = DirectNotifyGlobal.directNotify.newCategory('OptionSpace')
def __init__(self):
pass
def read_integer(self, input_file):
s = string.strip(input_file.readline())
if self.debug:
print s
return string.atoi(s)
def read_float(self, input_file):
s = string.strip(input_file.readline())
if self.debug:
print s
return string.atof(s)
def read_string(self, input_file):
return string.strip(input_file.readline())
def write_integer(self, output_file, value):
if output_file:
output_file.write(value.__repr__())
output_file.write('\n')
def write_float(self, output_file, value):
if output_file:
output_file.write(value.__repr__())
output_file.write('\n')
def write_text(self, output_file, string):
if output_file:
output_file.write(string)
def write_string(self, output_file, string):
if output_file:
output_file.write(string)
output_file.write('\n')
class DisplayOptions:
__module__ = __name__
notify = DirectNotifyGlobal.directNotify.newCategory('DisplayOptions')
def __init__(self):
self.restore_failed = False
self.restrictToEmbedded(1, False)
def restrictToEmbedded(self, restrict, change_display=True):
if base.config.GetBool('disable-restrict-to-embedded', False):
restrict = 0
if base.appRunner is None or base.appRunner.windowProperties is None:
restrict = 0
change_display = False
self.restrict_to_embedded = choice(restrict, 1, 0)
self.notify.debug('restrict_to_embedded: %s' % self.restrict_to_embedded)
if change_display:
self.set(base.options, base.pipe, base.options.getWidth(), base.options.getHeight())
return
def set(self, options, pipe, width, height):
state = False
self.notify.info('SET')
fullscreen = options.fullscreen_runtime
embedded = options.embedded_runtime
if self.restrict_to_embedded:
fullscreen = 0
embedded = 1
if embedded:
width = base.appRunner.windowProperties.getXSize()
height = base.appRunner.windowProperties.getYSize()
self.current_pipe = base.pipe
self.current_properties = WindowProperties(base.win.getProperties())
properties = self.current_properties
self.notify.debug('DISPLAY PREVIOUS:')
self.notify.debug(' EMBEDDED: %s' % bool(properties.getParentWindow()))
self.notify.debug(' FULLSCREEN: %s' % bool(properties.getFullscreen()))
self.notify.debug(' X SIZE: %s' % properties.getXSize())
self.notify.debug(' Y SIZE: %s' % properties.getYSize())
self.notify.debug('DISPLAY REQUESTED:')
self.notify.debug(' EMBEDDED: %s' % bool(embedded))
self.notify.debug(' FULLSCREEN: %s' % bool(fullscreen))
self.notify.debug(' X SIZE: %s' % width)
self.notify.debug(' Y SIZE: %s' % height)
if self.current_pipe == pipe:
bool(self.current_properties.getParentWindow()) == bool(embedded) and self.current_properties.getFullscreen() == fullscreen and self.current_properties.getXSize() == width and self.current_properties.getYSize() == height and self.notify.info('DISPLAY NO CHANGE REQUIRED')
state = True
else:
properties = WindowProperties()
properties.setSize(width, height)
properties.setFullscreen(fullscreen)
properties.setParentWindow(0)
if embedded:
properties = base.appRunner.windowProperties
original_sort = base.win.getSort()
if self.resetWindowProperties(pipe, properties):
self.notify.debug('DISPLAY CHANGE SET')
properties = base.win.getProperties()
self.notify.debug('DISPLAY ACHIEVED:')
self.notify.debug(' EMBEDDED: %s' % bool(properties.getParentWindow()))
self.notify.debug(' FULLSCREEN: %s' % bool(properties.getFullscreen()))
self.notify.debug(' X SIZE: %s' % properties.getXSize())
self.notify.debug(' Y SIZE: %s' % properties.getYSize())
if bool(properties.getParentWindow()) == bool(embedded):
properties.getFullscreen() == fullscreen and properties.getXSize() == width and properties.getYSize() == height and self.notify.info('DISPLAY CHANGE VERIFIED')
state = True
else:
self.notify.warning('DISPLAY CHANGE FAILED, RESTORING PREVIOUS DISPLAY')
self.restoreWindowProperties(options)
else:
self.notify.warning('DISPLAY CHANGE FAILED')
self.notify.warning('DISPLAY SET - BEFORE RESTORE')
self.restoreWindowProperties(options)
self.notify.warning('DISPLAY SET - AFTER RESTORE')
base.win.setSort(original_sort)
base.graphicsEngine.renderFrame()
base.graphicsEngine.renderFrame()
return state
def resetWindowProperties(self, pipe, properties):
if base.win:
currentProperties = WindowProperties(base.win.getProperties())
gsg = base.win.getGsg()
else:
currentProperties = WindowProperties.getDefault()
gsg = None
newProperties = WindowProperties(currentProperties)
newProperties.addProperties(properties)
newProperties.clearOrigin()
if base.pipe != pipe:
gsg = None
if gsg == None or currentProperties.getFullscreen() != newProperties.getFullscreen() or currentProperties.getParentWindow() != newProperties.getParentWindow():
self.notify.debug('requested properties: %s' % properties)
self.notify.debug('window properties: %s' % newProperties)
self.notify.debug('gsg: %s' % gsg)
base.pipe = pipe
if not base.openMainWindow(props=newProperties, gsg=gsg, keepCamera=True):
self.notify.warning('OPEN MAIN WINDOW FAILED')
return 0
self.notify.info('OPEN MAIN WINDOW PASSED')
base.graphicsEngine.openWindows()
if base.win.isClosed():
self.notify.warning('Window did not open, removing.')
base.closeWindow(base.win)
return 0
else:
self.notify.debug('Adjusting properties')
base.win.requestProperties(properties)
base.graphicsEngine.renderFrame()
return 1
def restoreWindowProperties(self, options):
if self.resetWindowProperties(self.current_pipe, self.current_properties):
self.restore_failed = False
else:
self.notify.warning("Couldn't restore original display settings!")
if (base.appRunner and base).appRunner.windowProperties:
options.fullscreen = 0
options.embedded = 1
tryProps = base.appRunner.windowProperties
if self.resetWindowProperties(self.current_pipe, tryProps):
self.current_properties = copy.copy(tryProps)
self.restore_failed = False
return
if self.current_properties.getFullscreen():
options.fullscreen = 0
options.embedded = 0
tryProps = self.current_properties
tryProps.setFullscreen(0)
if self.resetWindowProperties(self.current_pipe, tryProps):
self.current_properties = copy.copy(tryProps)
self.restore_failed = False
return
self.notify.error('Failed opening regular window!')
base.panda3dRenderError()
self.restore_failed = True
class Options(OptionSpace):
__module__ = __name__
notify = DirectNotifyGlobal.directNotify.newCategory('Options')
debug = False
options_version = 16
DEFAULT_API_FILE_PATH = 'game_api.txt'
DEFAULT_FILE_PATH = 'game_options.txt'
WORKING_FILE_PATH = 'last_working_options.txt'
POSSIBLE_WORKING_FILE_PATH = 'p_working_options.txt'
DEFAULT_STATE = 'default'
CONFIG_STATE = 'config'
NEW_STATE = 'new'
ATTEMPT_STATE = 'attempt'
WORKING_STATE = 'working'
ATTEMPT_WORKING_STATE = 'attempt_working'
option_low = 0
option_medium = 1
option_high = 2
option_custom = 3
texture_low = 256
texture_medium = 512
texture_high = 1024
texture_maximum = -1
default_max_texture_dimension = -1
texture_scale_low = 0.25
texture_scale_medium = 0.5
texture_scale_high = 1.0
texture_scale_maximum = 1.0
gamma_save_offset = 0.25
SpecialEffectsHigh = 2
SpecialEffectsMedium = 1
SpecialEffectsLow = 0
use_stereo = 0
RadarAxisMap = 0
RadarAxisCamera = 1
desiredApi = 'default'
def __init__(self):
self.default()
self.texture_scale_mode = True
self.recommendOptionsBasedOnData = base.config.GetBool('use-statistical-gameoptions-recommendation', 0)
self.invasionOn = False
self.display = DisplayOptions()
def save(self, file_path, state_string=None):
state = False
try:
output_file = open(file_path, 'w')
if output_file:
self.write_string(output_file, 'version ')
self.write_integer(output_file, self.version)
self.write_string(output_file, 'state ')
if state_string == None:
self.write_string(output_file, self.state)
else:
self.write_string(output_file, state_string)
self.write_string(output_file, 'api ')
self.write_string(output_file, self.api)
self.write_string(output_file, 'window_width ')
self.write_integer(output_file, self.window_width)
self.write_string(output_file, 'window_height ')
self.write_integer(output_file, self.window_height)
self.write_string(output_file, 'fullscreen_width ')
self.write_integer(output_file, self.fullscreen_width)
self.write_string(output_file, 'fullscreen_height ')
self.write_integer(output_file, self.fullscreen_height)
self.write_string(output_file, 'resolution ')
self.write_integer(output_file, self.resolution)
self.write_string(output_file, 'embedded ')
self.write_integer(output_file, self.embedded)
self.write_string(output_file, 'fullscreen ')
self.write_integer(output_file, self.fullscreen)
self.write_string(output_file, 'widescreen ')
self.write_integer(output_file, self.widescreen)
self.write_string(output_file, 'widescreen_resolution ')
self.write_integer(output_file, self.widescreen_resolution)
self.write_string(output_file, 'widescreen_fullscreen ')
self.write_integer(output_file, self.widescreen_fullscreen)
self.write_string(output_file, 'reflection ')
self.write_integer(output_file, self.reflection)
self.write_string(output_file, 'shader ')
self.write_integer(output_file, self.shader)
self.write_string(output_file, 'smooth_edges ')
self.write_integer(output_file, self.smoothEdges)
self.write_string(output_file, 'shadow ')
self.write_integer(output_file, self.shadow)
self.write_string(output_file, 'texture ')
self.write_integer(output_file, self.texture)
self.write_string(output_file, 'texture_compression ')
self.write_integer(output_file, self.textureCompression)
self.write_string(output_file, 'sound ')
self.write_integer(output_file, self.sound)
self.write_string(output_file, 'sound_volume ')
self.write_float(output_file, self.sound_volume)
self.write_string(output_file, 'music ')
self.write_integer(output_file, self.music)
self.write_string(output_file, 'music_volume ')
self.write_float(output_file, self.music_volume)
self.write_string(output_file, 'first_mate_voice ')
self.write_float(output_file, self.first_mate_voice)
self.write_string(output_file, 'gui_scale ')
self.write_float(output_file, self.gui_scale)
self.write_string(output_file, 'chatbox_scale ')
self.write_float(output_file, self.chatbox_scale)
self.write_string(output_file, 'special_effects ')
self.write_integer(output_file, self.special_effects)
self.write_string(output_file, 'texture_scale ')
if self.texture_scale <= 0.0:
self.texture_scale = 1.0
self.write_float(output_file, self.texture_scale)
self.write_string(output_file, 'character_detail_level ')
self.write_integer(output_file, self.character_detail_level)
self.write_string(output_file, 'terrain_detail_level ')
self.write_integer(output_file, self.terrain_detail_level)
self.write_string(output_file, 'memory ')
self.write_integer(output_file, self.memory)
self.write_string(output_file, 'mouse_look ')
self.write_integer(output_file, self.mouse_look)
self.write_string(output_file, 'ship_look ')
self.write_integer(output_file, self.ship_look)
self.write_string(output_file, 'gamma ')
self.write_float(output_file, self.gamma - self.gamma_save_offset)
self.write_string(output_file, 'gamma_enable ')
self.write_integer(output_file, self.gamma_enable)
self.write_string(output_file, 'cpu_frequency_warning ')
if self.cpu_frequency_warning:
cpu_frequency_warning = 0
else:
cpu_frequency_warning = 1
self.write_integer(output_file, cpu_frequency_warning)
self.write_string(output_file, 'hdr ')
self.write_integer(output_file, self.hdr)
self.write_string(output_file, 'hdr_factor ')
self.write_integer(output_file, self.hdr_factor)
self.write_string(output_file, 'ocean_visibility ')
self.write_integer(output_file, self.ocean_visibility)
self.write_string(output_file, 'ocean_visibility ')
self.write_integer(output_file, self.ocean_visibility)
self.write_string(output_file, 'land_map_radar_axis ')
self.write_integer(output_file, self.land_map_radar_axis)
self.write_string(output_file, 'ocean_map_radar_axis ')
self.write_integer(output_file, self.ocean_map_radar_axis)
self.write_string(output_file, 'simple_display_option ')
self.write_integer(output_file, self.simple_display_option)
self.write_string(output_file, 'use_stereo ')
self.write_integer(output_file, self.use_stereo)
output_file.close()
state = True
except:
pass
return state
def saveWorking(self):
options = Options()
options_loaded = options.load(Options.POSSIBLE_WORKING_FILE_PATH)
if options_loaded:
options.save(Options.WORKING_FILE_PATH, Options.WORKING_STATE)
options = Options()
options_loaded = options.load(Options.DEFAULT_FILE_PATH)
if options_loaded:
options.save(Options.DEFAULT_FILE_PATH, Options.WORKING_STATE)
def savePossibleWorking(self, options):
options.save(Options.POSSIBLE_WORKING_FILE_PATH, Options.WORKING_STATE)
def validate(self, dataType, dataName, default, acceptableValues=[]):
try:
data = self.tokenDict.get(dataName, default)
if data == default:
return default
if isinstance(dataType, type):
castValue = dataType(data)
if acceptableValues:
if castValue in acceptableValues:
return castValue
else:
return default
return castValue
else:
return default
except:
return default
def load(self, file_path):
state = False
self.desiredApi = launcher.getValue('api', self.desiredApi)
if self.desiredApi == 'default':
try:
input_file = open(Options.DEFAULT_API_FILE_PATH, 'r')
self.desiredApi = input_file.readline().strip()
except:
pass
try:
output_file = open(Options.DEFAULT_API_FILE_PATH, 'w')
output_file.writelines(self.desiredApi + '\n')
output_file.close()
except:
pass
else:
self.api = self.desiredApi
try:
input_file = open(file_path, 'r')
file_data = [ x.strip() for x in input_file.read().split('\n') ]
self.tokenDict = dict([ x for x in zip(file_data[:-1], file_data[1:]) if x[0][0].isalpha() or x[0].isalnum() ])
self.version = self.validate(int, 'version', 0)
self.state = self.validate(str, 'state', self.DEFAULT_STATE, [self.DEFAULT_STATE, self.CONFIG_STATE, self.NEW_STATE, self.ATTEMPT_STATE, self.WORKING_STATE, self.ATTEMPT_WORKING_STATE])
self.window_width = self.validate(int, 'window_width', 0)
self.window_height = self.validate(int, 'window_height', 0)
self.fullscreen_width = self.validate(int, 'fullscreen_width', 0)
self.fullscreen_height = self.validate(int, 'fullscreen_height', 0)
self.resolution = self.validate(int, 'resolution', 0, [0, 1])
self.embedded = self.validate(int, 'embedded', 0, [0, 1])
self.fullscreen = self.validate(int, 'fullscreen', 0, [0, 1])
self.widescreen = self.validate(int, 'widescreen', 0)
self.widescreen_resolution = self.validate(int, 'widescreen_resolution', 0)
self.widescreen_fullscreen = self.validate(int, 'widescreen_fullscreen', 0)
self.reflection = self.validate(int, 'reflection', 0)
self.shader = self.validate(int, 'shader', 0)
self.smoothEdges = self.validate(int, 'smooth_edges', 0)
self.shadow = self.validate(int, 'shadow', 0)
self.texture = self.validate(int, 'texture', -1, [-1, 256, 512, 1024])
self.textureCompression = self.validate(int, 'texture_compression', 1)
self.sound = self.validate(int, 'sound', 1)
self.sound_volume = self.validate(float, 'sound_volume', 1.0)
self.music = self.validate(int, 'music', 1)
self.music_volume = self.validate(float, 'music_volume', 1.0)
self.first_mate_voice = self.validate(int, 'first_mate_voice', 1)
self.gui_scale = self.validate(float, 'gui_scale', 0.5)
self.chatbox_scale = self.validate(float, 'chatbox_scale', 0.0)
self.special_effects = self.validate(int, 'special_effects', 2)
self.texture_scale = self.validate(float, 'texture_scale', 1.0)
if self.texture_scale <= 0.0:
self.texture_scale = 1.0
self.character_detail_level = self.validate(int, 'character_detail_level', 2)
self.terrain_detail_level = self.validate(int, 'terrain_detail_level', 2)
self.memory = self.validate(int, 'memory', 0)
self.mouse_look = self.validate(int, 'mouse_look', 0, [0, 1])
self.ship_look = self.validate(int, 'ship_look', 1, [0, 1])
base.setShipLookAhead(self.ship_look)
self.gamma = self.validate(float, 'gamma', 0.0)
self.gamma += self.gamma_save_offset
self.gamma_enable = self.validate(int, 'gamma_enable', 0, [0, 1])
token = self.read_string(input_file)
cpu_frequency_warning = self.validate(int, 'cpu_frequency_warning', 0)
if cpu_frequency_warning:
self.cpu_frequency_warning = 0
else:
self.cpu_frequency_warning = 1
self.hdr = self.validate(int, 'hdr', 0)
self.hdr_factor = self.validate(float, 'hdr_factor', 1.0)
if base.config.GetInt('want-game-options-ship-visibility', 0):
self.ocean_visibility = self.validate(int, 'ocean_visibility', 1, [0, 1, 2])
else:
self.ocean_visibility = 0
self.land_map_radar_axis = self.validate(int, 'land_map_radar_axis', self.RadarAxisMap, [self.RadarAxisMap, self.RadarAxisCamera])
self.ocean_map_radar_axis = self.validate(int, 'ocean_map_radar_axis', self.RadarAxisCamera, [self.RadarAxisMap, self.RadarAxisCamera])
self.simple_display_option = self.validate(int, 'simple_display_option', 3, [0, 1, 2, 3])
self.use_stereo = self.validate(int, 'use_stereo', 0)
state = True
except:
pass
self.runtime()
return state
def config_resolution(self):
self.resolution = base.width_to_resolution_id(base.config.GetInt('win-size', 800))
horizontal_resolution = win_size.getIntWord(0)
vertical_resolution = win_size.getIntWord(1)
self.widescreen = 0
if horizontal_resolution == 1280 and vertical_resolution == 720:
self.widescreen = 1
self.widescreen_resolution = 0
if horizontal_resolution == 1920 and vertical_resolution == 1080:
self.widescreen = 1
self.widescreen_resolution = 1
self.window_width = horizontal_resolution
self.window_height = vertical_resolution
self.fullscreen_width = horizontal_resolution
self.fullscreen_height = vertical_resolution
def config_to_options(self):
self.default()
win_size = ConfigVariableInt('win-size')
self.resolution = GameOptions.width_to_resolution_id(base.config.GetInt('win-size', 800))
horizontal_resolution = win_size.getIntWord(0)
vertical_resolution = win_size.getIntWord(1)
self.widescreen = 0
if horizontal_resolution == 1280:
if vertical_resolution == 720:
self.widescreen = 1
self.widescreen_resolution = 0
if horizontal_resolution == 1920:
if vertical_resolution == 1080:
self.widescreen = 1
self.widescreen_resolution = 1
self.fullscreen = base.config.GetBool('fullscreen', 1)
self.reflection = base.config.GetBool('want-water-reflection', 1) and 2
self.reflection = base.config.GetBool('want-water-reflection-show-through-only', 1) and 1
else:
self.reflection = 0
if base.config.GetBool('want-water-reflect-all', 0):
self.reflection = 3
self.window_width = horizontal_resolution
self.window_height = vertical_resolution
self.fullscreen_width = horizontal_resolution
self.fullscreen_height = vertical_resolution
self.shader = base.config.GetBool('want-shaders', 1)
self.shadow = base.config.GetBool('want-avatar-shadows', 0)
self.texture = base.config.GetInt('max-texture-dimension', Options.default_max_texture_dimension)
self.textureCompression = base.config.GetInt('compressed-textures', 0)
self.texture_scale = base.config.GetFloat('texture-scale', 1.0)
self.sound = base.config.GetBool('audio-sfx-active', 1)
self.sound_volume = base.config.GetFloat('audio-sfx-volume', 1.0)
self.music = base.config.GetBool('audio-music-active', 1)
self.music_volume = base.config.GetFloat('audio-music-volume', 1.0)
self.first_mate_voice = base.config.GetBool('first-mate-voice', 1)
self.ocean_visibility = base.config.GetInt('ocean-visibility', 0)
self.land_map_radar_axis = base.config.GetInt('land-map-radar-axis', self.RadarAxisMap)
self.ocean_map_radar_axis = base.config.GetInt('ocean-map-radar-axis', self.RadarAxisCamera)
self.runtime()
def options_to_config(self):
config_variable = ConfigVariableBool('want-widescreen', 0)
config_variable.setValue(self.widescreen)
config_variable = ConfigVariableBool('want-water-reflection', 1)
if self.reflection == 0:
config_variable.setValue(0)
else:
config_variable.setValue(1)
config_variable = ConfigVariableBool('want-water-reflection-show-through-only', 1)
if self.reflection == 1:
config_variable.setValue(1)
if self.reflection == 2:
config_variable.setValue(0)
if self.reflection == 3:
config_variable.setValue(0)
config_variable = ConfigVariableBool('want-water-reflect-all', 0)
if self.reflection == 3:
config_variable.setValue(1)
else:
config_variable.setValue(0)
config_variable = ConfigVariableBool('want-shaders', 0)
config_variable.setValue(self.shader_runtime)
config_variable = ConfigVariableBool('want-avatar-shadows', 0)
config_variable.setValue(self.shadow)
config_variable = ConfigVariableBool('compressed-textures')
config_variable.setValue(self.textureCompressionRuntime)
def getWidth(self):
if self.embedded_runtime:
return base.appRunner.windowProperties.getXSize()
else:
if self.fullscreen_runtime:
return self.fullscreen_width
else:
return self.window_width
def getHeight(self):
if self.embedded_runtime:
return base.appRunner.windowProperties.getYSize()
else:
if self.fullscreen_runtime:
return self.fullscreen_height
else:
return self.window_height
def getEmbedded(self):
return self.embedded_runtime
def getFullscreen(self):
return not self.embedded_runtime and self.fullscreen_runtime
def getWindowed(self):
return not self.embedded_runtime and not self.fullscreen_runtime
def optionsToPrcData(self):
string = ''
if not base.appRunner:
string = string + 'win-size ' + self.getWidth().__repr__() + ' ' + self.getHeight().__repr__() + '\n'
string = string + 'fullscreen ' + self.getFullscreen().__repr__() + '\n'
if self.textureCompressionRuntime:
string = string + 'compressed-textures 1\n'
else:
string = string + 'compressed-textures 0\n'
if self.debug:
print string
return string
def setRuntimeOptions(self):
base.enableSoundEffects(self.sound)
base.enableMusic(self.music)
base.enableFirstMate(self.first_mate_voice)
if base.sfxManagerList:
index = 0
length = len(base.sfxManagerList)
while index < length:
sfx_manager = base.sfxManagerList[index]
sfx_manager.setVolume(self.sound_volume)
index += 1
if base.musicManager:
base.musicManager.setVolume(self.music_volume)
self.setRuntimeSpecialEffects()
self.setRuntimeGridDetailLevel(self.terrain_detail_level)
self.setRuntimeAvatarDetailLevel(self.character_detail_level)
if self.smoothEdges:
render.setAntialias(AntialiasAttrib.MMultisample)
if hasattr(base, 'setLowMemory'):
base.setLowMemory(self.memory)
if base.win and base.win.getGsg():
if self.gamma_enable:
base.win.getGsg().setGamma(self.optionsGammaToGamma(self.gamma))
self.setTextureScale()
self.setRuntimeStereo()
self.setLandMapRadarAxis()
self.setOceanMapRadarAxis()
def setTextureScale(self):
config_variable = ConfigVariableDouble('texture-scale', 1.0)
value = self.texture_scale
if value <= 0.0:
value = 1.0
if value >= 1.0:
value = 1.0
config_variable.setValue(value)
if value < 0.3:
limit = 32
else:
if value < 0.75:
limit = 64
else:
limit = 128
limit *= 1048576
if base.win.getGsg():
base.win.getGsg().getPreparedObjects().setGraphicsMemoryLimit(limit)
ConfigVariableInt('graphics-memory-limit').setValue(limit)
def getGUIScale(self):
return self.gui_scale * 0.6 + 0.7
def default(self):
self.version = self.options_version
self.state = 'default'
self.api = self.desiredApi
self.window_width = 800
self.window_height = 600
self.fullscreen_width = 800
self.fullscreen_height = 600
self.resolution = 1
self.embedded = 0
self.fullscreen = 1
self.widescreen = 0
self.widescreen_resolution = 0
self.widescreen_fullscreen = 0
self.reflection = 1
self.shader = 0
self.smoothEdges = 0
self.shadow = 0
self.texture = Options.default_max_texture_dimension
self.textureCompression = 0
self.sound = 1
self.sound_volume = 1.0
self.music = 1
self.music_volume = 1.0
self.first_mate_voice = 1
self.gui_scale = 0.5
self.chatbox_scale = 0.0
self.special_effects = self.SpecialEffectsHigh
self.texture_scale = 0.0
self.character_detail_level = Options.option_high
self.terrain_detail_level = Options.option_high
self.memory = 0
self.mouse_look = 0
self.ship_look = 1
self.gamma = self.gamma_save_offset
self.gamma_enable = 0
self.cpu_frequency_warning = 1
self.reserved10 = 0
self.ocean_visibility = 0
self.hdr = 0
self.hdr_factor = 1.0
self.simple_display_option = Options.option_custom
self.use_stereo = 0
self.land_map_radar_axis = self.RadarAxisMap
self.ocean_map_radar_axis = self.RadarAxisCamera
self.runtime()
def runtime(self):
self.embedded_runtime = self.embedded
self.fullscreen_runtime = self.fullscreen
self.widescreen_runtime = self.widescreen
self.shader_runtime = self.shader
self.texture_runtime = self.texture
self.textureCompressionRuntime = self.textureCompression
def totalDisplayResolutions(self, pipe):
total = 0
if pipe:
di = pipe.getDisplayInformation()
total = di.getTotalDisplayModes()
return total
def findResolution(self, width, height, bits_per_pixel, pipe):
found = False
if pipe:
di = pipe.getDisplayInformation()
index = 0
total_display_modes = di.getTotalDisplayModes()
while index < total_display_modes:
if di.getDisplayModeBitsPerPixel(index) == bits_per_pixel:
if width == di.getDisplayModeWidth(index) and height == di.getDisplayModeHeight(index):
found = True
break
index += 1
return found
def verifyOptions(self, pipe, overwrite=False):
state = False
bits_per_pixel = 32
if pipe:
state = True
di = pipe.getDisplayInformation()
if self.window_width <= di.getMaximumWindowWidth():
state = self.window_height <= di.getMaximumWindowHeight() and state and True
else:
state = False
if di.getMaximumWindowWidth() > 0:
if di.getMaximumWindowHeight() > 0:
self.window_width = self.findResolution(di.getMaximumWindowWidth(), di.getMaximumWindowHeight(), bits_per_pixel, pipe) and overwrite and di.getMaximumWindowWidth()
self.window_height = di.getMaximumWindowHeight()
else:
index = 0
total_display_modes = di.getTotalDisplayModes()
while index < total_display_modes:
if di.getDisplayModeBitsPerPixel(index) == bits_per_pixel:
if di.getDisplayModeWidth(index) >= GameOptions.MinimumHorizontalResolution and di.getDisplayModeHeight(index) >= GameOptions.MinimumVerticalResolution:
if overwrite:
self.window_width = di.getDisplayModeWidth(index)
self.window_height = di.getDisplayModeHeight(index)
break
index += 1
if self.findResolution(self.fullscreen_width, self.fullscreen_height, bits_per_pixel, pipe):
state = state and True
else:
state = False
if di.getMaximumWindowWidth() > 0:
if di.getMaximumWindowHeight() > 0:
self.fullscreen_width = self.findResolution(di.getMaximumWindowWidth(), di.getMaximumWindowHeight(), bits_per_pixel, pipe) and overwrite and di.getMaximumWindowWidth()
self.fullscreen_height = di.getMaximumWindowHeight()
else:
index = 0
total_display_modes = di.getTotalDisplayModes()
while index < total_display_modes:
if di.getDisplayModeBitsPerPixel(index) == bits_per_pixel:
if di.getDisplayModeWidth(index) >= GameOptions.MinimumHorizontalResolution and di.getDisplayModeHeight(index) >= GameOptions.MinimumVerticalResolution:
if overwrite:
self.fullscreen_width = di.getDisplayModeWidth(index)
self.fullscreen_height = di.getDisplayModeHeight(index)
break
index += 1
return state
def physicalMemoryToGeomCacheSize(self, pipe):
size = 100
di = pipe.getDisplayInformation()
if di.getDisplayState() == DisplayInformation.DSSuccess:
block_size = 1024 * 1024 * 256
physical_memory = di.getPhysicalMemory()
if physical_memory > 0:
blocks = (physical_memory + block_size / 2) / block_size
if blocks <= 1:
size = 100
elif blocks == 2:
size = 100
elif blocks == 3:
size = 300
elif blocks == 4:
size = 500
elif blocks == 5:
size = 1000
elif blocks == 6:
size = 2000
elif blocks == 7:
size = 3000
else:
size = 3000
return size
def setPrc(self, string):
loadPrcFileData('game_options', string)
def setRuntimeGeomCacheSize(self, size):
if size > 0:
string = 'geom-cache-size ' + size.__repr__() + '\n'
self.setPrc(string)
def getTextureScaleString(self):
level = self.texture_scale
if level == Options.texture_scale_low:
string = 'low'
else:
if level == Options.texture_scale_medium:
string = 'medium'
else:
if level == Options.texture_scale_high:
string = 'high'
return string
def getGameOptionString(self, level):
if level == Options.option_low:
string = 'low'
else:
if level == Options.option_medium:
string = 'med'
else:
string = 'high'
return string
def setRuntimeGridDetailLevel(self, level):
string = 'grid-detail %s\n' % self.getGameOptionString(level)
self.setPrc(string)
try:
messenger.send('grid-detail-changed', [level])
base.positionFarCull()
except:
pass
def setRuntimeAvatarDetailLevel(self, level):
string = 'avatar-detail %s\n' % self.getGameOptionString(level)
self.setPrc(string)
def setGeomCacheSize(self, pipe):
return
if base.config.GetInt('ignore-game-options', 0) == 0:
size = self.physicalMemoryToGeomCacheSize(pipe)
if size > 0:
string = 'geom-cache-size ' + size.__repr__() + '\n'
if self.debug:
print string
self.setPrc(string)
def getPhysicalMemory(self, pipe):
physical_memory = 0
di = pipe.getDisplayInformation()
if di.getDisplayState() == DisplayInformation.DSSuccess:
physical_memory = di.getPhysicalMemory()
return physical_memory
def optionsGammaToGamma(self, gamma):
return gamma * 2.0 + 0.5
def automaticGraphicsApiSelection(self, pipe):
di = pipe.getDisplayInformation()
if di.getDisplayState() == DisplayInformation.DSSuccess:
vendor_id = di.getVendorId()
device_id = di.getDeviceId()
if vendor_id == 4153:
if pipe.getInterfaceName() == 'OpenGL':
self.api = 'pandadx9'
def recommendedOptions(self, pipe, realtime):
di = pipe.getDisplayInformation()
if di.getDisplayState() == DisplayInformation.DSSuccess:
vendor_id = di.getVendorId()
device_id = di.getDeviceId()
driver_year = di.getDriverDateYear()
driver_month = di.getDriverDateMonth()
driver_day = di.getDriverDateDay()
driver_product = di.getDriverProduct()
driver_version = di.getDriverVersion()
driver_sub_version = di.getDriverSubVersion()
driver_build = di.getDriverBuild()
days = 0.0
if driver_year > 0 and driver_month > 0 and driver_day > 0:
today = datetime.date.today()
days = self.compareDates(today.year, today.month, today.day, driver_year, driver_month, driver_day)
if driver_product or driver_version or driver_sub_version or driver_build:
product = 0
version = 0
sub_version = 0
build = 0
state = self.compareNumbers(product, version, sub_version, build, driver_product, driver_version, driver_sub_version, driver_build)
low_end_card = 0
self.shader = 0
self.output('vendor_id ', vendor_id)
self.output('device_id ', device_id)
if vendor_id == 4098:
ati_device_list = [
[
0, 'Rage 128 Pro', 20550], [0, 'RV100 Radeon 7000 / Radeon VE', 20825], [0, 'Rage 128 PRO ULTRA Video Controller', 21574], ['AMD Stream Processor', 29262], ['AMD Stream Processor Secondary', 29294], ['Fire PRO Professional Graphics ASIC', 38348], ['FireStream 9170', 38169], ['ATI FireGL T2', 16724], ['ATI FireGL T2 Secondary', 16756], ['ATI FireGL V3100', 23396], ['ATI FireGL V3100 Secondary', 23412], ['ATI FireGL V3200', 15956], ['ATI FireGL V3200 Secondary', 15988], ['ATI FireGL V3300', 29010], ['ATI FireGL V3300 Secondary', 29042], ['ATI FireGL V3350', 29011], ['ATI FireGL V3350 Secondary', 29043], ['ATI FireGL V3400', 29138], ['ATI FireGL V3400 Secondary', 29170], ['ATI FireGL V5000', 24136], ['ATI FireGL V5000 Secondary', 24168], ['ATI FireGL V5100', 21841], ['ATI FireGL V5100 Secondary', 21873], ['ATI FireGL V5200', 29146], ['ATI FireGL V5200 Secondary', 29178], ['ATI FireGL V5300', 28933], ['ATI FireGL V5300 Secondary', 28965], ['ATI FireGL V7100', 21840], ['ATI FireGL V7100 Secondary', 21872], ['ATI FireGL V7200', 23888], ['ATI FireGL V7200 ', 28932], ['ATI FireGL V7200 Secondary', 23920], ['ATI FireGL V7200 Secondary ', 28964], ['ATI FireGL V7300', 28942], ['ATI FireGL V7300 Secondary', 28974], ['ATI FireGL V7350', 28943], ['ATI FireGL V7350 Secondary', 28975], ['ATI FireGL V7600', 37903], ['ATI FireGL V7700', 38161], ['ATI FireGL V8600', 37899], ['ATI FireGL V8650', 37898], ['ATI FireGL X1', 20039], ['ATI FireGL X1 Secondary', 20071], ['ATI FireGL X2-256/X2-256t', 20043], ['ATI FireGL X2-256/X2-256t Secondary', 20075], ['ATI FireGL X3-256', 19021], ['ATI FireGL X3-256 Secondary', 19053], ['ATI FireGL Z1', 16711], ['ATI FireGL Z1 Secondary', 16743], ['ATI FireMV 2200', 23397], ['ATI FireMV 2200 Secondary', 23413], ['ATI FireMV 2250', 29083], ['ATI FireMV 2250 Secondary', 29115], ['ATI FireMV 2260', 38350], ['ATI FireMV 2260', 38351], ['ATI FireMV 2400', 12625], ['ATI FireMV 2400 Secondary', 12657], ['ATI FireMV 2450', 38349], ['ATI FireStream 2U', 29262], ['ATI FireStream 2U Secondary', 29294], ['ATI MOBILITY FIRE GL 7800', 19544], ['ATI MOBILITY FIRE GL T2/T2e', 20052], ['ATI MOBILITY FireGL V3100', 21604], ['ATI MOBILITY FireGL V3200', 12628], ['ATI MOBILITY FireGL V5000', 22090], ['ATI MOBILITY FireGL V5000 ', 22091], ['ATI MOBILITY FireGL V5100', 23881], ['ATI MOBILITY FireGL V5200', 29124], ['ATI MOBILITY FireGL V5250', 29140], ['ATI MOBILITY FireGL V7100', 28934], ['ATI MOBILITY FireGL V7200', 28931], ['ATI MOBILITY RADEON', 19545], ['ATI MOBILITY RADEON 7500', 19543], ['ATI MOBILITY RADEON 9500', 20050], ['ATI MOBILITY RADEON 9550', 20054], ['ATI MOBILITY RADEON 9600/9700 Series', 20048], ['ATI MOBILITY RADEON 9800', 19022], ['ATI Mobility Radeon HD 2300', 29200], ['ATI Mobility Radeon HD 2300 ', 29201], ['ATI Mobility Radeon HD 2400', 38089], ['ATI Mobility Radeon HD 2400 XT', 38088], [1, 'ATI Mobility Radeon HD 2600', 38273], [1, 'ATI Mobility Radeon HD 2600 XT', 38275], [1, 'ATI Mobility Radeon HD 3400 Series', 38340], [1, 'ATI Mobility Radeon HD 3430', 38338], [1, 'ATI Mobility Radeon HD 3650', 38289], [1, 'ATI Mobility Radeon HD 3670', 38291], [1, 'ATI Mobility Radeon HD 3850', 38148], [1, 'ATI Mobility Radeon HD 3850 X2', 38150], [1, 'ATI Mobility Radeon HD 3870', 38152], [1, 'ATI Mobility Radeon HD 3870 X2', 38153], ['ATI Mobility Radeon X1300', 29002], ['ATI Mobility Radeon X1300 ', 29001], ['ATI Mobility Radeon X1300 ', 29003], ['ATI Mobility Radeon X1300 ', 29004], ['ATI Mobility Radeon X1350', 29067], ['ATI Mobility Radeon X1350 ', 29068], ['ATI Mobility Radeon X1350 ', 29078], ['ATI Mobility Radeon X1400', 28997], ['ATI Mobility Radeon X1450', 29062], ['ATI Mobility Radeon X1450 ', 29069], ['ATI Mobility Radeon X1600', 29125], ['ATI Mobility Radeon X1700', 29141], ['ATI Mobility Radeon X1700 ', 29150], ['ATI Mobility Radeon X1700 XT', 29142], [1, 'ATI Mobility Radeon X1800', 28930], [1, 'ATI Mobility Radeon X1800 XT', 28929], [1, 'ATI Mobility Radeon X1900', 29316], [1, 'ATI Mobility Radeon X2300', 29066], [1, 'ATI Mobility Radeon X2300 ', 29064], ['ATI MOBILITY RADEON X300', 21601], ['ATI MOBILITY RADEON X300 ', 21600], ['ATI MOBILITY RADEON X300 ', 12626], ['ATI MOBILITY RADEON X600', 12624], ['ATI MOBILITY RADEON X600 SE', 21602], ['ATI MOBILITY RADEON X700', 22098], ['ATI MOBILITY RADEON X700 ', 22099], ['ATI MOBILITY RADEON X700 Secondary', 22131], [1, 'ATI MOBILITY RADEON X800', 23882], [1, 'ATI MOBILITY RADEON X800 XT', 23880], [1, 'ATI Mobility Radeon HD 2600 XT Gemini', 38283], [1, 'ATI Radeon 2100', 31086], [1, 'ATI Radeon 3100 Graphics', 38417], [1, 'ATI Radeon 3100 Graphics', 38419], ['ATI Radeon 9550/X1050 Series', 16723], ['ATI Radeon 9550/X1050 Series Secondary', 16755], ['ATI RADEON 9600 Series', 16720], ['ATI RADEON 9600 Series ', 20049], ['ATI RADEON 9600 Series ', 16721], ['ATI RADEON 9600 Series ', 16725], ['ATI RADEON 9600 Series ', 16722], ['ATI RADEON 9600 Series Secondary', 20081], ['ATI RADEON 9600 Series Secondary ', 16753], ['ATI RADEON 9600 Series Secondary ', 16752], ['ATI RADEON 9600 Series Secondary ', 16757], ['ATI RADEON 9600 Series Secondary ', 16754], ['ATI Radeon E2400', 38091], ['ATI Radeon HD 2350', 38087], ['ATI Radeon HD 2400', 38092], ['ATI Radeon HD 2400 LE', 38085], ['ATI Radeon HD 2400 PRO', 38083], ['ATI Radeon HD 2400 PRO AGP', 38084], ['ATI Radeon HD 2400 XT', 38081], [1, 'ATI Radeon HD 2600 LE', 38286], [1, 'ATI Radeon HD 2600 Pro', 38281], [1, 'ATI Radeon HD 2600 Pro AGP', 38279], [1, 'ATI Radeon HD 2600 XT', 38280], [1, 'ATI Radeon HD 2600 XT AGP', 38278], [1, 'ATI Radeon HD 2900 GT', 9405], [1, 'ATI Radeon HD 2900 PRO', 37891], [1, 'ATI Radeon HD 2900 XT', 37888], [1, 'ATI Radeon HD 2900 XT ', 37889], [1, 'ATI Radeon HD 2900 XT ', 37890], [1, 'ATI Radeon HD 3200 Graphics', 38416], [1, 'ATI Radeon HD 3200 Graphics', 38418], [1, 'ATI Radeon HD 3300 Graphics', 38420], [1, 'ATI Radeon HD 3430', 38343], [1, 'ATI Radeon HD 3450', 38341], [1, 'ATI Radeon HD 3470', 38336], [1, 'ATI Radeon HD 3600 Series', 38288], [1, 'ATI Radeon HD 3600 Series', 38295], [1, 'ATI Radeon HD 3600 Series', 38296], [1, 'ATI Radeon HD 3600 Series', 38297], [1, 'ATI Radeon HD 3650 AGP', 38294], [1, 'ATI Radeon HD 3830', 38151], [1, 'ATI Radeon HD 3850', 38149], [1, 'ATI Radeon HD 3850 AGP', 38165], [1, 'ATI Radeon HD 3850 X2', 38163], [1, 'ATI Radeon HD 3870', 38145], [1, 'ATI Radeon HD 3870 X2', 38159], ['ATI Radeon X1200 Series', 31006], ['ATI Radeon X1200 Series ', 31007], [1, 'Radeon X1950 XTX Uber - Limited Edition', 29256], [1, 'Radeon X1950 XTX Uber - Limited Edition Secondary', 29288], [1, 'ATI Radeon X1950 GT', 29320], [1, 'ATI Radeon X1950 GT Secondary', 29352], [1, 'ATI Radeon X800 GT', 23886], [1, 'ATI RADEON X800 GT', 21838], [1, 'ATI Radeon X800 GT Secondary', 23918], [1, 'ATI RADEON X800 GT Secondary', 21870], [1, 'ATI RADEON X800 XL', 21837], [1, 'ATI RADEON X800 XL Secondary', 21869], [1, 'ATI RADEON X850 PRO', 19275], [1, 'ATI RADEON X850 PRO Secondary', 19307], [1, 'ATI RADEON X850 SE', 19274], [1, 'ATI RADEON X850 SE Secondary', 19306], [1, 'ATI RADEON X850 XT', 19273], [1, 'ATI RADEON X850 XT Platinum Edition', 19276], [1, 'ATI RADEON X850 XT Platinum Edition Secondary', 19308], [1, 'Radeon X800 CrossFire Edition', 21837], [1, 'Radeon X800 CrossFire Edition Secondary', 21869], [1, 'Radeon X850 CrossFire Edition', 23890], [1, 'Radeon X850 CrossFire Edition Secondary', 23922], ['Radeon X550/X700 Series', 22095], ['ATI Radeon X700 Series Secondary', 22127], [1, 'ATI RADEON X850 XT Secondary', 19305], ['ATI Radeon Xpress 1200 Series', 31039], ['ATI Radeon Xpress 1200 Series ', 31041], ['ATI Radeon Xpress 1200 Series ', 31042], [0, 'ATI Radeon Xpress Series', 23137], [0, 'ATI Radeon Xpress Series ', 23139], [0, 'ATI Radeon Xpress Series ', 23138], [0, 'ATI Radeon Xpress Series ', 23105], [0, 'ATI Radeon Xpress Series ', 23107], [0, 'ATI Radeon Xpress Series ', 23106], [0, 'ATI Radeon Xpress Series ', 22868], [0, 'ATI Radeon Xpress Series ', 22612], [0, 'ATI Radeon Xpress Series ', 22869], [0, 'ATI Radeon Xpress Series ', 22900], [0, 'ATI Radeon Xpress Series ', 22644], [0, 'ATI Radeon Xpress Series ', 22901], ['Radeon 9500', 16708], ['Radeon 9500 ', 16713], ['Radeon 9500 PRO / 9700', 20037], ['Radeon 9500 PRO / 9700 Secondary', 20069], ['Radeon 9500 Secondary', 16740], ['Radeon 9500 Secondary ', 16745], ['Radeon 9600 TX', 20038], ['Radeon 9600 TX Secondary', 20070], ['Radeon 9600TX', 16710], ['Radeon 9600TX Secondary', 16742], ['Radeon 9700 PRO', 20036], ['Radeon 9700 PRO Secondary', 20068], ['Radeon 9800', 20041], ['Radeon 9800 PRO', 20040], ['Radeon 9800 PRO Secondary', 20072], ['Radeon 9800 SE', 16712], ['Radeon 9800 SE Secondary', 16744], ['Radeon 9800 Secondary', 20073], ['Radeon 9800 XT', 20042], ['Radeon 9800 XT Secondary', 20074], ['Radeon X1300 / X1550 Series', 28998], ['Radeon X1300 / X1550 Series Secondary', 29030], ['Radeon X1300 Series', 29006], ['Radeon X1300 Series ', 29022], ['Radeon X1300 Series ', 29005], ['Radeon X1300 Series ', 29123], ['Radeon X1300 Series ', 29071], ['Radeon X1300 Series Secondary', 29038], ['Radeon X1300 Series Secondary ', 29054], ['Radeon X1300 Series Secondary ', 29037], ['Radeon X1300 Series Secondary ', 29155], ['Radeon X1300 Series Secondary ', 29103], ['Radeon X1300/X1550 Series', 28994], ['Radeon X1300/X1550 Series ', 29056], ['Radeon X1300/X1550 Series ', 29059], ['Radeon X1300/X1550 Series ', 29063], ['Radeon X1300/X1550 Series Secondary', 29026], ['Radeon X1300/X1550 Series Secondary ', 29088], ['Radeon X1300/X1550 Series Secondary ', 29091], ['Radeon X1300/X1550 Series Secondary ', 29095], ['Radeon X1550 64-bit', 28999], ['Radeon X1550 64-bit ', 29023], ['Radeon X1550 64-bit ', 29087], ['Radeon X1550 64-bit Secondary', 29031], ['Radeon X1550 64-bit Secondary ', 29055], ['Radeon X1550 Series', 28995], ['Radeon X1550 Series ', 29075], ['Radeon X1550 Series Secondary', 29027], ['Radeon X1550 Series Secondary ', 29107], ['Radeon X1600 Pro / Radeon X1300 XT', 29134], ['Radeon X1600 Pro / Radeon X1300 XT Secondary', 29166], ['Radeon X1600 Series', 28992], ['Radeon X1600 Series ', 29120], ['Radeon X1600 Series ', 29122], ['Radeon X1600 Series ', 29126], ['Radeon X1600 Series ', 29057], ['Radeon X1600 Series ', 29133], ['Radeon X1600 Series Secondary', 29024], ['Radeon X1600 Series Secondary ', 29154], ['Radeon X1600 Series Secondary ', 29158], ['Radeon X1600 Series Secondary ', 29089], ['Radeon X1600 Series Secondary ', 29165], ['Radeon X1600 Series Secondary ', 29152], ['Radeon X1650 Series', 29121], ['Radeon X1650 Series ', 29331], ['Radeon X1650 Series ', 29329], ['Radeon X1650 Series ', 29127], ['Radeon X1650 Series Secondary', 29153], ['Radeon X1650 Series Secondary ', 29363], ['Radeon X1650 Series Secondary ', 29361], ['Radeon X1650 Series Secondary ', 29159], [1, 'Radeon X1800 Series', 28928], [1, 'Radeon X1800 Series ', 28936], [1, 'Radeon X1800 CrossFire Edition', 28937], [1, 'Radeon X1800 Series ', 28938], [1, 'Radeon X1800 Series ', 28939], [1, 'Radeon X1800 Series ', 28940], [1, 'Radeon X1800 Series Secondary', 28960], [1, 'Radeon X1800 Series Secondary ', 28968], [1, 'Radeon X1800 CrossFire Edition Secondary', 28969], [1, 'Radeon X1800 Series Secondary ', 28970], [1, 'Radeon X1800 Series Secondary ', 28971], [1, 'Radeon X1800 Series Secondary ', 28972], [1, 'Radeon X1900 Series', 29251], [1, 'Radeon X1900 Series ', 29253], [1, 'Radeon X1900 Series ', 29254], [1, 'Radeon X1900 Series ', 29255], [1, 'Radeon X1900 CrossFire Edition', 29257], [1, 'Radeon X1900 Series ', 29258], [1, 'Radeon X1900 Series ', 29259], [1, 'Radeon X1900 Series ', 29260], [1, 'Radeon X1900 Series ', 29261], [1, 'Radeon X1900 Series ', 29263], [1, 'Radeon X1900 Series Secondary', 29283], [1, 'Radeon X1900 Series Secondary ', 29285], [1, 'Radeon X1900 Series Secondary ', 29286], [1, 'Radeon X1900 Series Secondary ', 29287], [1, 'Radeon X1900 CrossFire Edition Secondary', 29289], [1, 'Radeon X1900 Series Secondary ', 29290], [1, 'Radeon X1900 Series Secondary ', 29291], [1, 'Radeon X1900 Series Secondary ', 29292], [1, 'Radeon X1900 Series Secondary ', 29293], [1, 'Radeon X1900 Series Secondary ', 29295], [1, 'Radeon X1950 Series', 29312], [1, 'Radeon X1950 CrossFire Edition', 29248], [1, 'Radeon X1950 Series ', 29252], [1, 'Radeon X1950 Series Secondary', 29344], [1, 'Radeon X1950 CrossFire Edition Secondary', 29280], [1, 'Radeon X1950 Series Secondary ', 29284], ['Radeon X300/X550/X1050 Series', 23392], ['Radeon X300/X550/X1050 Series ', 23395], ['Radeon X300/X550/X1050 Series Secondary', 23411], ['Radeon X300/X550/X1050 Series Secondary ', 23408], ['Radeon X550/X700 Series ', 22103], ['Radeon X550/X700 Series Secondary', 22135], ['Radeon X600 Series', 23394], ['Radeon X600 Series Secondary', 23410], ['Radeon X600/X550 Series', 15952], ['Radeon X600/X550 Series Secondary', 15984], ['Radeon X700', 24141], ['Radeon X700 PRO', 24139], ['Radeon X700 PRO Secondary', 24171], ['Radeon X700 SE', 24140], ['Radeon X700 SE Secondary', 24172], ['Radeon X700 Secondary', 24173], ['Radeon X700 XT', 24138], ['Radeon X700 XT Secondary', 24170], ['Radeon X700/X550 Series', 24143], ['Radeon X700/X550 Series Secondary', 24175], [1, 'Radeon X800 GT', 21835], [1, 'Radeon X800 GT Secondary', 21867], [1, 'Radeon X800 GTO', 21833], [1, 'Radeon X800 GTO ', 21839], [1, 'Radeon X800 GTO ', 23887], [1, 'Radeon X800 GTO Secondary', 21865], [1, 'Radeon X800 GTO Secondary ', 21871], [1, 'Radeon X800 GTO Secondary ', 23919], [1, 'Radeon X800 PRO', 19017], [1, 'Radeon X800 PRO Secondary', 19049], [1, 'Radeon X800 SE', 19023], [1, 'Radeon X800 SE Secondary', 19055], [1, 'Radeon X800 Series', 19016], [1, 'Radeon X800 Series ', 19018], [1, 'Radeon X800 Series ', 19020], [1, 'Radeon X800 Series ', 21832], [1, 'Radeon X800 Series Secondary', 19048], [1, 'Radeon X800 Series Secondary ', 19050], [1, 'Radeon X800 Series Secondary ', 19052], [1, 'Radeon X800 Series Secondary ', 21864], [1, 'Radeon X800 VE', 19028], [1, 'Radeon X800 VE Secondary', 19060], [1, 'Radeon X800 XT', 19019], [1, 'Radeon X800 XT ', 23895], [1, 'Radeon X800 XT Platinum Edition', 19024], [1, 'Radeon X800 XT Platinum Edition ', 21834], [1, 'Radeon X800 XT Platinum Edition Secondary', 19056], [1, 'Radeon X800 XT Platinum Edition Secondary ', 21866], [1, 'Radeon X800 XT Secondary', 19051], [1, 'Radeon X800 XT Secondary ', 23927], [1, 'Radeon X850 XT', 23890], [1, 'Radeon X850 XT Platinum Edition', 23885], [1, 'Radeon X850 XT Platinum Edition Secondary', 23917], [1, 'Radeon X850 XT Secondary', 23922]]
length = len(ati_device_list)
for i in range(length):
entry = ati_device_list[i]
if entry[0] == 1:
if device_id == entry[2]:
self.shader = 1
self.output('device_name* ', entry[1])
break
else:
if entry[0] == 0:
if device_id == entry[2]:
low_end_card = 1
self.output('device_name* ', entry[1])
break
else:
if device_id == entry[1]:
self.output('device_name ', entry[0])
break
if vendor_id == 4318:
nvidia_device_list = [
[
0, 45, 'NV5 TNT2 Model 64 / TNT2 Model 64 Pro'], [0, 272, 'GeForce2 MX/MX 400'], [0, 273, 'GeForce2 MX200'], [0, 272, 'GeForce2 MX/MX 400'], [0, 370, 'GeForce4 MX 420'], [0, 373, 'NV17M GeForce4 420 Go'], [0, 978, 'NVIDIA GeForce 6100 nForce 400'], [0, 977, 'NVIDIA GeForce 6100 nForce 405'], [0, 976, 'NVIDIA GeForce 6100 nForce 430'], [0, 578, 'NVIDIA GeForce 6100'], [0, 576, 'GeForce 6150'], [0, 577, 'NVIDIA GeForce 6150 LE'], [0, 581, 'NVIDIA Quadro NVS 210S / NVIDIA GeForce 6150LE'], [335, 'GeForce 6200'], [243, 'GeForce 6200'], [545, 'GeForce 6200 '], [546, 'GeForce 6200 A-LE'], [355, 'GeForce 6200 LE'], [354, 'GeForce 6200SE TurboCache(TM)'], [353, 'GeForce 6200 TurboCache(TM)'], [361, 'GeForce 6250'], [352, 'GeForce 6500'], [1, 321, 'GeForce 6600'], [1, 242, 'GeForce 6600'], [1, 320, 'GeForce 6600 GT'], [1, 241, 'GeForce 6600 GT'], [1, 322, 'GeForce 6600 LE'], [1, 244, 'GeForce 6600 LE'], [1, 323, 'GeForce 6600 VE'], [1, 325, 'GeForce 6610 XL'], [1, 327, 'GeForce 6700 XL'], [1, 65, 'GeForce 6800'], [1, 193, 'GeForce 6800 '], [1, 529, 'GeForce 6800 '], [1, 71, 'GeForce 6800 GS'], [1, 246, 'GeForce 6800 GS'], [1, 192, 'GeForce 6800 GS'], [1, 69, 'GeForce 6800 GT'], [1, 70, 'GeForce 6800 GT '], [1, 533, 'GeForce 6800 GT '], [1, 249, 'GeForce 6800 Series GPU'], [1, 66, 'GeForce 6800 LE'], [1, 194, 'GeForce 6800 LE '], [1, 530, 'GeForce 6800 LE '], [1, 64, 'GeForce 6800 Ultra'], [1, 249, 'GeForce 6800 Series GPU'], [1, 67, 'GeForce 6800 XE'], [1, 68, 'GeForce 6800 XT'], [1, 72, 'GeForce 6800 XT'], [1, 195, 'GeForce 6800 XT '], [1, 536, 'GeForce 6800 XT '], [1338, 'GeForce 7050 PV / NVIDIA nForce 630a'], [1339, 'GeForce 7050 PV / NVIDIA nForce 630a '], [1342, 'GeForce 7025 / NVIDIA nForce 630a'], [2016, 'GeForce 7150 / NVIDIA nForce 630i'], [2017, 'GeForce 7100 / NVIDIA nForce 630i'], [2018, 'GeForce 7050 / NVIDIA nForce 630i'], [2019, 'GeForce 7050 / NVIDIA nForce 610i'], [2021, 'GeForce 7050 / NVIDIA nForce 620i'], [362, 'GeForce 7100 GS'], [479, 'GeForce 7300 GS'], [915, 'GeForce 7300 GT '], [917, 'GeForce 7300 GT '], [738, 'GeForce 7300 GT'], [465, 'GeForce 7300 LE'], [467, 'GeForce 7300 SE/7200 GS'], [464, 'GeForce 7350 LE'], [477, 'GeForce 7500 LE'], [1, 914, 'GeForce 7600 GS '], [1, 737, 'GeForce 7600 GS'], [1, 913, 'GeForce 7600 GT'], [1, 736, 'GeForce 7600 GT'], [1, 916, 'GeForce 7600 LE'], [1, 912, 'GeForce 7650 GS'], [1, 245, 'GeForce 7800 GS'], [1, 147, 'GeForce 7800 GS '], [1, 146, 'GeForce 7800 GT'], [1, 144, 'GeForce 7800 GTX'], [1, 145, 'GeForce 7800 GTX'], [1, 149, 'GeForce 7800 SLI'], [1, 739, 'GeForce 7900 GS'], [1, 658, 'GeForce 7900 GS '], [1, 657, 'GeForce 7900 GT/GTO'], [1, 656, 'GeForce 7900 GTX'], [1, 659, 'GeForce 7900 GX2'], [1, 740, 'GeForce 7950 GT'], [1, 660, 'GeForce 7950 GX2 '], [1, 661, 'NVIDIA GeForce 7950 GT '], [2121, 'GeForce 8200'], [2123, 'GeForce 8200 '], [2120, 'GeForce 8300'], [1059, 'NVIDIA GeForce 8300 GS'], [1762, 'GeForce 8400'], [1056, 'GeForce 8400 SE'], [1028, 'GeForce 8400 GS'], [1058, 'NVIDIA GeForce 8400 GS '], [1060, 'GeForce 8400 GS '], [1764, 'GeForce 8400 GS '], [1057, 'NVIDIA GeForce 8500 GT'], [1, 1024, 'NVIDIA GeForce 8600 GTS'], [1, 1025, 'GeForce 8600 GT'], [1, 1026, 'NVIDIA GeForce 8600 GT'], [1, 1027, 'GeForce 8600GS'], [1, 401, 'NVIDIA GeForce 8800 GTX'], [1, 403, 'NVIDIA GeForce 8800 GTS'], [1, 404, 'GeForce 8800 Ultra'], [1, 1536, 'GeForce 8800 GTS 512'], [1, 1538, 'GeForce 8800 GT'], [1, 1553, 'GeForce 8800 GT '], [1, 1542, 'GeForce 8800 GS'], [1, 1552, 'GeForce 9600 GSO'], [1, 1570, 'GeForce 9600 GT'], [1, 1540, 'GeForce 9800 GX2'], [1, 1554, 'GeForce 9800 GTX'], [2127, 'GeForce 8100 / nForce 720a'], [2122, 'nForce 730a'], [2125, 'nForce 750a SLI'], [2124, 'nForce 780a SLI'], [802, 'GeForce FX 5200'], [801, 'GeForce FX 5200 Ultra'], [803, 'GeForce FX 5200LE'], [806, 'GeForce FX 5500'], [806, 'GeForce FX 5500'], [786, 'GeForce FX 5600'], [785, 'GeForce FX 5600 Ultra'], [788, 'GeForce FX 5600XT'], [834, 'GeForce FX 5700'], [833, 'GeForce FX 5700 Ultra'], [835, 'GeForce FX 5700LE'], [836, 'GeForce FX 5700VE'], [770, 'GeForce FX 5800'], [769, 'GeForce FX 5800 Ultra'], [817, 'GeForce FX 5900'], [816, 'GeForce FX 5900 Ultra'], [819, 'GeForce FX 5950 Ultra'], [804, 'GeForce FX Go5200 64M'], [794, 'GeForce FX Go5600'], [839, 'GeForce FX Go5700'], [359, 'GeForce Go 6200/6400'], [360, 'GeForce Go 6200/6400'], [1, 328, 'GeForce Go 6600'], [1, 200, 'GeForce Go 6800'], [1, 201, 'GeForce Go 6800 Ultra'], [1, 152, 'GeForce Go 7800'], [1, 153, 'GeForce Go 7800 GTX'], [1, 664, 'GeForce Go 7900 GS'], [1, 665, 'GeForce Go 7900 GTX'], [389, 'GeForce MX 4000'], [250, 'GeForce PCX 5750'], [251, 'GeForce PCX 5900'], [512, 'GeForce3'], [513, 'GeForce3 Ti200'], [514, 'GeForce3 Ti500'], [369, 'GeForce4 MX 440'], [385, 'GeForce4 MX 440 with AGP8X'], [371, 'GeForce4 MX 440-SE'], [368, 'GeForce4 MX 460'], [595, 'GeForce4 Ti 4200'], [641, 'GeForce4 Ti 4200 with AGP8X'], [593, 'GeForce4 Ti 4400'], [592, 'GeForce4 Ti 4600'], [640, 'GeForce4 Ti 4800'], [642, 'GeForce4 Ti 4800SE'], [515, 'Quadro DCC'], [777, 'Quadro FX 1000'], [846, 'Quadro FX 1100'], [254, 'Quadro FX 1300'], [206, 'Quadro FX 1400'], [670, 'Quadro FX 1500'], [1039, 'Quadro FX 1700'], [776, 'Quadro FX 2000'], [824, 'Quadro FX 3000'], [253, 'Quadro PCI-E Series'], [1, 248, 'Quadro FX 3400/4400'], [1, 205, 'Quadro FX 3450/4000 SDI'], [1, 669, 'Quadro FX 3500'], [1, 1562, 'Quadro FX 3700'], [1, 78, 'Quadro FX 4000'], [1, 157, 'Quadro FX 4500'], [1, 671, 'Quadro FX 4500 X2'], [1, 414, 'Quadro FX 4600'], [1, 668, 'NVIDIA Quadro FX 5500'], [1, 413, 'Quadro FX 5600'], [478, 'Quadro FX 350'], [1034, 'Quadro FX 370'], [811, 'Quadro FX 500/FX 600'], [334, 'Quadro FX 540'], [332, 'Quadro FX 540 MXM'], [811, 'Quadro FX 500/FX 600'], [333, 'Quadro FX 550'], [926, 'Quadro FX 560'], [1038, 'Quadro FX 570'], [831, 'Quadro FX 700'], [844, 'Quadro FX Go1000'], [204, 'Quadro FX Go1400'], [796, 'Quadro FX Go700'], [394, 'Quadro NVS with AGP8X'], [810, 'Quadro NVS 280 PCI'], [253, 'Quadro PCI-E Series'], [357, 'Quadro NVS 285'], [1071, 'Quadro NVS 290'], [330, 'Quadro NVS 440'], [378, 'Quadro NVS'], [394, 'Quadro NVS with AGP8X'], [275, 'Quadro2 MXR/EX'], [378, 'Quadro NVS'], [395, 'Quadro4 380 XGL'], [376, 'Quadro4 550 XGL'], [392, 'Quadro4 580 XGL'], [603, 'Quadro4 700 XGL'], [601, 'Quadro4 750 XGL'], [600, 'Quadro4 900 XGL'], [648, 'Quadro4 980 XGL'], [652, 'Quadro4 Go700']]
length = len(nvidia_device_list)
for i in range(length):
entry = nvidia_device_list[i]
if entry[0] == 1:
if device_id == entry[1]:
self.shader = 1
self.output('device_name* ', entry[2])
break
else:
if entry[0] == 0:
if device_id == entry[1]:
low_end_card = 1
self.output('device_name* ', entry[2])
break
else:
if device_id == entry[0]:
self.output('device_name ', entry[1])
break
if vendor_id == 32902:
intel_device_list = [
[
'Q35 Device 0 / X3000', 10674], ['Q35 Device 1 / X3000', 10675], ['G33/G31 Device 0 / 3000', 10690], ['G33/G31 Device 1 / 3000', 10691], ['Q33 Device 0', 10706], ['Q33 Device 1', 10707], ['Q965/Q963 Device 0', 10642], ['Q965/Q963 Device 1', 10643], ['GM965 Device 0 / X3100', 10754], ['GM965 Device 1 / X3100', 10755], ['G965 Device 0 / X3000', 10658], ['G965 Device 1 / X3000', 10659], ['946GZ Device 0', 10610], ['946GZ Device 1', 10611], ['945GM Device 0', 10146], ['945GM Device 1', 10150], ['945G Device 0', 10098], ['945G Device 1', 10102], ['915GM Device 0', 9618], ['915GM Device 1', 10130], ['915G Device 0', 9602], ['915G Device 1', 10114], ['865G', 9586], ['855GM', 13698], ['845G', 9570], ['830M', 13687]]
low_end_card = 1
if vendor_id == 21299:
low_end_card = 1
if vendor_id == 4153:
low_end_card = 1
if vendor_id == 4358:
low_end_card = 1
size = di.getVideoMemory()
if size == 0:
size = di.getTextureMemory()
mb = 1024 * 1024
if self.debug:
size = 32 * mb
test_size = base.config.GetInt('test-video-memory', 0)
if test_size > 0:
size = test_size * mb
self.textureCompression = 1
self.texture = Options.texture_maximum
if vendor_id == 4318:
self.textureCompression = 0
if vendor_id == 4098:
self.textureCompression = 0
if size <= 0:
self.texture = Options.texture_low
else:
if size <= 32 * mb:
self.texture = Options.texture_low
else:
if size <= 64 * mb:
self.texture = Options.texture_medium
else:
if size <= 96 * mb:
self.texture = Options.texture_medium
else:
if size <= 128 * mb:
self.texture = Options.texture_high
else:
if size <= 256 * mb:
pass
else:
if size <= 512 * mb:
pass
else:
if size <= 768 * mb:
pass
else:
if size <= 1024 * mb:
pass
else:
if size > 1024 * mb:
pass
scale = Options.texture_scale_low
block_size = 1024 * 1024 * 256
physical_memory = di.getPhysicalMemory()
if physical_memory > 0:
blocks = (physical_memory + block_size / 2) / block_size
if blocks <= 1:
scale = Options.texture_scale_low
self.special_effects = Options.SpecialEffectsLow
self.character_detail_level = Options.option_low
self.terrain_detail_level = Options.option_low
self.memory = 1
else:
if blocks == 2:
scale = Options.texture_scale_low
self.special_effects = Options.SpecialEffectsLow
self.character_detail_level = Options.option_low
self.terrain_detail_level = Options.option_low
self.memory = 1
else:
if blocks == 3:
scale = Options.texture_scale_medium
self.special_effects = Options.SpecialEffectsMedium
self.character_detail_level = Options.option_medium
self.terrain_detail_level = Options.option_medium
else:
if blocks == 4:
if self.shader:
scale = Options.texture_scale_medium
self.special_effects = Options.SpecialEffectsMedium
self.character_detail_level = Options.option_high
self.terrain_detail_level = Options.option_high
else:
scale = Options.texture_scale_medium
self.special_effects = Options.SpecialEffectsMedium
self.character_detail_level = Options.option_medium
self.terrain_detail_level = Options.option_medium
else:
if blocks == 5:
if self.shader:
scale = Options.texture_scale_high
self.special_effects = Options.SpecialEffectsHigh
self.character_detail_level = Options.option_high
self.terrain_detail_level = Options.option_high
else:
scale = Options.texture_scale_medium
self.special_effects = Options.SpecialEffectsMedium
self.character_detail_level = Options.option_medium
self.terrain_detail_level = Options.option_medium
else:
if self.shader:
scale = Options.texture_scale_high
self.special_effects = Options.SpecialEffectsHigh
self.character_detail_level = Options.option_high
self.terrain_detail_level = Options.option_high
else:
scale = Options.texture_scale_medium
self.special_effects = Options.SpecialEffectsMedium
self.character_detail_level = Options.option_medium
self.terrain_detail_level = Options.option_medium
if self.memory:
self.textureCompression = 1
low_end_card = base.config.GetInt('low-end-card', low_end_card)
if low_end_card:
self.output('info ', 'test-low-end card')
scale = Options.texture_scale_low
self.special_effects = Options.SpecialEffectsLow
self.character_detail_level = Options.option_low
self.terrain_detail_level = Options.option_low
self.texture_scale = scale
high_end_card = base.config.GetInt('test-high-end-card', self.shader)
if high_end_card:
width = 1024
height = 768
bits_per_pixel = 32
supported = False
total_display_modes = di.getTotalDisplayModes()
if total_display_modes > 0:
index = 0
while index < total_display_modes:
if di.getDisplayModeBitsPerPixel(index) == bits_per_pixel:
if width == di.getDisplayModeWidth(index) and height == di.getDisplayModeHeight(index):
supported = True
break
index += 1
if supported:
if width <= di.getMaximumWindowWidth() and height <= di.getMaximumWindowHeight():
self.window_width = width
self.window_height = height
self.fullscreen_width = width
self.fullscreen_height = height
if self.recommendOptionsBasedOnData:
def validate_gameOptions(options):
if len(options) != 18:
return False
if int(options[1:2]) not in [0, 1, 2]:
return False
if int(options[3:4]) not in [0, 1]:
return False
if int(options[5:6]) not in [0, 1]:
return False
if int(options[7:8]) not in [1, 2, 4]:
return False
if int(options[9:10]) not in [0, 1]:
return False
if int(options[11:12]) not in [0, 1, 2]:
return False
if int(options[13:14]) not in [0, 1, 2]:
return False
if int(options[15:16]) not in [0, 1, 2]:
return False
if int(options[17:18]) not in [0, 1]:
return False
return True
system_key = (
'0x%04x' % di.getVendorId(), '0x%04x' % di.getDeviceId(), '%s.%d.%d.%d' % (os.name, di.getOsPlatformId(), di.getOsVersionMajor(), di.getOsVersionMinor()))
if GameOptionsMatrix.GameOptionsMatrix.has_key(system_key):
options_from_data = GameOptionsMatrix.GameOptionsMatrix[system_key][0]
if validate_gameOptions(options_from_data):
self.reflection = int(options_from_data[1:2])
self.shader = int(options_from_data[3:4])
self.shadow = int(options_from_data[5:6])
self.texture_scale = 1.0 / int(options_from_data[7:8])
self.textureCompression = int(options_from_data[9:10])
self.special_effects = int(options_from_data[11:12])
self.character_detail_level = int(options_from_data[13:14])
self.terrain_detail_level = int(options_from_data[15:16])
self.memory = int(options_from_data[17:18])
else:
self.textureCompression = 1
self.texture = Options.texture_medium
self.simplify()
self.use_stereo = 0
if not realtime:
self.runtime()
return
def simplify(self):
if self.shader:
if self.special_effects == Options.SpecialEffectsHigh:
self.simple_display_option = self.character_detail_level == Options.option_high and self.terrain_detail_level == Options.option_high and self.texture_scale == Options.texture_scale_high and 2
else:
if self.special_effects == Options.SpecialEffectsLow:
self.simple_display_option = 0
else:
if self.character_detail_level == Options.option_low:
self.simple_display_option = 0
else:
if self.terrain_detail_level == Options.option_low:
self.simple_display_option = 0
else:
if self.texture_scale == Options.texture_scale_low:
self.simple_display_option = 0
else:
self.simple_display_option = 1
self.character_detail_level = (self.simple_display_option == 2 and Options).option_high
self.terrain_detail_level = Options.option_high
self.reflection = Options.option_high
self.special_effects = Options.SpecialEffectsHigh
self.texture_scale = Options.texture_scale_high
self.texture = Options.texture_high
self.shader = 1
self.shadow = 1
else:
if self.simple_display_option == 1:
self.character_detail_level = Options.option_medium
self.terrain_detail_level = Options.option_medium
self.reflection = Options.option_medium
self.special_effects = Options.SpecialEffectsMedium
self.texture_scale = Options.texture_scale_medium
self.texture = Options.texture_medium
self.shader = 0
self.shadow = 0
else:
self.character_detail_level = Options.option_low
self.terrain_detail_level = Options.option_low
self.reflection = Options.option_low
self.special_effects = Options.SpecialEffectsLow
self.texture_scale = Options.texture_scale_low
self.texture = Options.texture_low
self.shader = 0
self.shadow = 0
def setInvasion(self, invasionOn):
self.invasionOn = invasionOn
def getCharacterDetailSetting(self):
if self.invasionOn:
return 0
return self.character_detail_level
def getTerrainDetailSetting(self):
if self.invasionOn:
return 0
return self.terrain_detail_level
def getSpecialEffectsSetting(self):
if self.invasionOn:
return 0
return self.special_effects
def setRuntimeSpecialEffects(self):
if hasattr(base, 'localAvatar'):
gamearea = localAvatar.getParentObj()
from pirates.world.DistributedGameArea import DistributedGameArea
if isinstance(gamearea, DistributedGameArea):
if gamearea.envEffects:
gamearea.envEffects.unloadEffects()
gamearea.envEffects.loadEffects()
(self.special_effects == Options.SpecialEffectsLow or self.invasionOn) and MotionTrail.setGlobalEnable(False)
else:
if self.special_effects == Options.SpecialEffectsMedium:
MotionTrail.setGlobalEnable(True)
else:
if self.special_effects == Options.SpecialEffectsHigh:
MotionTrail.setGlobalEnable(True)
def setRuntimeStereo(self):
if self.use_stereo:
if not base.stereoEnabled:
base.toggleStereo()
else:
if base.stereoEnabled:
base.toggleStereo()
def setLandMapRadarAxis(self):
messenger.send('landMapRadarAxisChanged', [self.land_map_radar_axis])
def getLandMapRadarAxis(self):
return self.land_map_radar_axis
def setOceanMapRadarAxis(self):
messenger.send('oceanMapRadarAxisChanged', [self.ocean_map_radar_axis])
def getOceanMapRadarAxis(self):
return self.ocean_map_radar_axis
def output(self, token, value):
self.notify.info(token + '= ' + value.__repr__())
def log(self, message=None):
if message:
self.notify.info(message)
self.output('version ', self.version)
self.output('state ', self.state)
self.output('api ', self.api)
self.output('window_width ', self.window_width)
self.output('window_height ', self.window_height)
self.output('fullscreen_width ', self.fullscreen_width)
self.output('fullscreen_height ', self.fullscreen_height)
self.output('resolution ', self.resolution)
self.output('embedded ', self.embedded)
self.output('fullscreen ', self.fullscreen)
self.output('widescreen ', self.widescreen)
self.output('widescreen_resolution ', self.widescreen_resolution)
self.output('widescreen_fullscreen ', self.widescreen_fullscreen)
self.output('reflection ', self.reflection)
self.output('shader ', self.shader)
self.output('shadow ', self.shadow)
self.output('texture ', self.texture)
self.output('texture_compression ', self.textureCompression)
self.output('sound ', self.sound)
self.output('sound_volume ', self.sound_volume)
self.output('music ', self.music)
self.output('music_volume ', self.music_volume)
self.output('first_mate_voice ', self.first_mate_voice)
self.output('gui_scale ', self.gui_scale)
self.output('chatbox_scale ', self.chatbox_scale)
self.output('special_effects ', self.special_effects)
self.output('texture_scale ', self.texture_scale)
self.output('character_detail_level ', self.character_detail_level)
self.output('terrain_detail_level ', self.terrain_detail_level)
self.output('memory ', self.memory)
self.output('mouse_look ', self.mouse_look)
self.output('ship_look ', self.ship_look)
self.output('gamma ', self.gamma - self.gamma_save_offset)
self.output('gamma_enable ', self.gamma_enable)
self.output('cpu_frequency_warning ', self.cpu_frequency_warning)
self.output('hdr ', self.hdr)
self.output('hdr_factor ', self.hdr_factor)
self.output('ship_visibility_from_islands ', self.ocean_visibility)
self.output('land_map_radar_axis', self.land_map_radar_axis)
self.output('ocean_map_radar_axis', self.ocean_map_radar_axis)
self.output('simple_display_option', self.simple_display_option)
self.output('use_stereo', self.use_stereo)
scale = self.texture_scale
if scale <= 0.0:
scale = 1.0
scale = 1.0 / scale
if self.embedded:
x = base.appRunner and base.appRunner.windowProperties and base.appRunner.windowProperties.getXSize()
y = base.appRunner.windowProperties.getYSize()
else:
if self.fullscreen:
x = self.fullscreen_width
y = self.fullscreen_height
else:
x = self.window_width
y = self.window_height
gameOptionsCode = 'r%ds%ds%dt%dc%de%dc%dt%dm%de%df%dx%dy%ds%d' % (self.reflection, self.shader, self.shadow, scale, self.textureCompression, self.special_effects, self.character_detail_level, self.terrain_detail_level, self.memory, self.embedded, self.fullscreen, x, y, self.use_stereo)
base.gameOptionsCode = gameOptionsCode
def compareNumbers(self, x1, y1, z1, w1, x2, y2, z2, w2):
axis = 0
delta = 0
dx = x1 - x2
dy = y1 - y2
dz = z1 - z2
dw = w1 - w2
if dx == 0:
if dy == 0:
if dz == 0:
if dw == 0:
axis = 0
delta = 0
else:
axis = 4
delta = dw
else:
axis = 3
delta = dz
else:
axis = 2
delta = dy
else:
axis = 1
delta = dx
return [
delta, axis, dx, dy, dz, dw]
def compareDates(self, year1, month1, day1, year2, month2, day2):
delta_days = 0.0
state = self.compareNumbers(year1, month1, day1, 0, year2, month2, day2, 0)
if state[0] == 0:
pass
else:
delta_days = state[2] * 365.25 + state[3] * 30.4375 + state[4]
return delta_days
class KeyMappings(OptionSpace):
__module__ = __name__
notify = DirectNotifyGlobal.directNotify.newCategory('KeyMappings')
def __init__(self):
self.startWatcher()
def startWatcher(self):
self.notify.debug('Starting key watcher')
base.buttonThrowers[0].node().setButtonDownEvent('GameOptions-buttonWatcher')
def destroy(self):
self.notify.debug('Stopping key watcher')
base.buttonThrowers[0].node().setButtonDownEvent('')
class GameOptions(BorderFrame):
__module__ = __name__
notify = DirectNotifyGlobal.directNotify.newCategory('GameOptions')
debug = False
resolution_table = [(800, 600), (1024, 768), (1280, 1024), (1600, 1200)]
widescreen_resolution_table = [(1280, 720), (1920, 1080)]
MinimumHorizontalResolution = 800
MinimumVerticalResolution = 600
def __init__(self, title, x, y, width, height, options=None, file_path=None, pipe=None, access=0, chooser=0, keyMappings=None):
self.inAdFrame = False
self.width = width
self.height = height
self.chooser = chooser
self.enable_hdr = base.config.GetInt('want-game-options-hdr', 1)
self.enable_ship_visibility = base.config.GetInt('want-game-options-ship-visibility', 0)
if file_path:
self.file_path = file_path
else:
self.file_path = Options.DEFAULT_FILE_PATH
if base.config.GetBool('want-test-gameoptions', 0):
self.velvet = False
base.gameoptions = self
else:
if base.hasEmbedded:
self.velvet = embedded.isMainWindowVisible() or base.cr.isPaid() == OTPGlobals.AccessVelvetRope
else:
self.velvet = False
if launcher.getValue('GAME_SHOW_ADDS') == 'NO' or sys.platform == 'darwin' or sys.platform == 'linux2':
self.velvet = False
self.restartDialog = None
self.savedDialog = None
self.logoutDialog = None
self.noteOnChangeDialog = None
self.stereoOptionDialog = None
self.restore_options = None
self.current_options = None
if options:
self.options = options
else:
self.options = Options()
if self.options.load(self.file_path):
pass
else:
self.options = Options()
self.options.config_to_options()
self.options.recommendedOptions(base.pipe, True)
if keyMappings:
self.keyMappings = keyMappings
else:
self.keyMappings = KeyMappings()
self.options.options_to_config()
self.current_options = copy.copy(self.options)
self.shader_support = False
self.shader_model = GraphicsStateGuardian.SM00
if base.win and base.win.getGsg():
self.shader_model = base.win.getGsg().getShaderModel()
if self.shader_model >= GraphicsStateGuardian.SM11:
self.shader_support = True
self.play = False
try:
if base.cr.gameFSM.getCurrentState().getName() == 'playGame':
self.play = True
except:
pass
else:
if access == Freebooter.VELVET_ROPE:
self.freeLock = True
else:
self.freeLock = False
if launcher.getValue('GAME_SHOW_ADDS') == 'NO' or sys.platform == 'darwin' or sys.platform == 'linux2':
self.freeLock = False
if hasattr(base, 'localAvatar'):
if not Freebooter.getPaidStatus(base.localAvatar.getDoId()):
self.freeLock = True
BorderFrame.__init__(self, relief=None, state=DGG.NORMAL, frameColor=PiratesGuiGlobals.FrameColor, borderWidth=PiratesGuiGlobals.BorderWidth, pos=(x, 0.0, y), frameSize=(0, width, 0, height), sortOrder=20)
self.initialiseoptions(GameOptions)
self.gui = GameOptionsGui(self, title, x, y, width, height, options, file_path, pipe, access, chooser, keyMappings)
BorderFrame.hide(self)
return
def destroy(self):
if self.gui:
self.gui.destroy()
else:
self.ignoreAll()
self.delete_dialogs()
BorderFrame.destroy(self)
def get_pipe(self):
return base.pipe
def set_display(self, options, pipe, width, height):
success = options.display.set(options, pipe, width, height)
if success:
self.current_options = copy.copy(options)
else:
self.options = copy.copy(self.current_options)
self.set_options(False, True)
def fade_button(self, button):
if button:
button.setAlphaScale(self.not_selected_color)
button['text_fg'] = (1.0, 1.0, 1.0, 1.0)
button['selected'] = False
def highlight_button(self, button):
if button:
button.setAlphaScale(self.selected_color)
button['text_fg'] = (0.2, 0.8, 0.6, 1.0)
button['selected'] = True
def inactive_highlight_button(self, button):
if button:
button.setAlphaScale(self.selected_color)
button['text_fg'] = (0.1, 0.4, 0.3, 1.0)
def inactive_button(self, button):
if button:
button.setAlphaScale(self.selected_color)
button['text_fg'] = (0.2, 0.2, 0.2, 1.0)
def default_button_function(self):
self.options = Options()
self.options.recommendedOptions(self.get_pipe(), True)
self.set_options(True)
if (hasattr(base, 'localAvatar') and base.localAvatar.isPopulated() and self).gui:
self.tutPanelOptions = [
0, 0, 0]
self.gui.setTutPanelOptions()
def restore_button_function(self):
if self.restore_options:
self.options = copy.copy(self.restore_options)
self.set_options(True)
if (hasattr(base, 'localAvatar') and base.localAvatar.isPopulated() and self).gui:
self.tutPanelOptions = [
0, 0, 0]
self.setTutPanelOptions()
def delete_dialogs(self):
if self.restartDialog:
self.restartDialog.destroy()
del self.restartDialog
self.restartDialog = None
if self.savedDialog:
self.savedDialog.destroy()
del self.savedDialog
self.savedDialog = None
if self.logoutDialog:
self.logoutDialog.destroy()
del self.logoutDialog
self.logoutDialog = None
if self.noteOnChangeDialog:
self.noteOnChangeDialog.destroy()
del self.noteOnChangeDialog
self.noteOnChangeDialog = None
if self.stereoOptionDialog:
self.stereoOptionDialog.destroy()
del self.stereoOptionDialog
self.stereoOptionDialog = None
return
def display_restart_dialog(self):
self.delete_dialogs()
self.restartDialog = PDialog.PDialog(text=PLocalizer.GameOptionsApplicationRestartMessage, style=OTPDialog.Acknowledge, giveMouse=False, command=self.default_dialog_command)
self.restartDialog.setBin('gui-popup', -5)
def display_noteOnChange_dialog(self):
self.delete_dialogs()
self.noteOnChangeDialog = PDialog.PDialog(text=PLocalizer.GameOptionsNoteOnChange, style=OTPDialog.Acknowledge, giveMouse=False, command=self.default_dialog_command)
self.noteOnChangeDialog.setBin('gui-popup', -5)
def display_stereoOption_dialog(self):
self.delete_dialogs()
self.stereoOptionDialog = PDialog.PDialog(text=PLocalizer.GameOptionsStereoOption, style=OTPDialog.Acknowledge, giveMouse=False, command=self.default_dialog_command)
self.stereoOptionDialog.setBin('gui-popup', -5)
def save_button_function(self):
self.delete_dialogs()
if self.options.save(self.file_path, Options.NEW_STATE):
self.options.log('User Saved Options')
self.savedDialog = PDialog.PDialog(text=PLocalizer.GameOptionsSaved, style=OTPDialog.Acknowledge, giveMouse=False, command=self.default_dialog_command)
else:
self.savedDialog = PDialog.PDialog(text=PLocalizer.GameOptionsFailedToSaveOptions, style=OTPDialog.Acknowledge, giveMouse=False, command=self.default_dialog_command)
if hasattr(base, 'localAvatar'):
if base.localAvatar.isPopulated():
inv = base.localAvatar.getInventory()
if inv:
if self.tutPanelOptions[0] != inv.getStackQuantity(InventoryType.TutTypeBasic):
base.localAvatar.sendRequestChangeTutType(InventoryType.TutTypeBasic, self.tutPanelOptions[0])
if self.tutPanelOptions[1] != inv.getStackQuantity(InventoryType.TutTypeIntermediate):
base.localAvatar.sendRequestChangeTutType(InventoryType.TutTypeIntermediate, self.tutPanelOptions[1])
self.tutPanelOptions[2] != inv.getStackQuantity(InventoryType.TutTypeAdvanced) and base.localAvatar.sendRequestChangeTutType(InventoryType.TutTypeAdvanced, self.tutPanelOptions[2])
self.savedDialog.setBin('gui-popup', -5)
def default_dialog_command(self, value):
self.delete_dialogs()
def showUpsell(self):
if self.chooser:
self.chooser.popupFeatureBrowser(0, 0)
else:
base.localAvatar.guiMgr.showNonPayer(quest='Game_Options', focus=0)
self.hide()
def close_button_function(self):
self.hide()
def x_to_gui_coordinate(self, x):
return x * self.width
def y_to_gui_coordinate(self, y):
return self.height - y * self.height
def reflection_off_button_function(self):
self.options.reflection = 0
Water.all_reflections_off()
self.highlight_button(self.reflection_off_button)
self.fade_button(self.reflection_sky_button)
self.fade_button(self.reflection_default_button)
self.fade_button(self.reflection_all_button)
messenger.send('options_reflections_change', [0])
self.update()
def reflection_sky_button_function(self):
self.options.reflection = 1
Water.all_reflections_show_through_only()
self.fade_button(self.reflection_off_button)
self.highlight_button(self.reflection_sky_button)
self.fade_button(self.reflection_default_button)
self.fade_button(self.reflection_all_button)
messenger.send('options_reflections_change', [1])
self.update()
def reflection_default_button_function(self):
self.options.reflection = 2
Water.all_reflections_on()
self.fade_button(self.reflection_off_button)
self.fade_button(self.reflection_sky_button)
self.highlight_button(self.reflection_default_button)
self.fade_button(self.reflection_all_button)
messenger.send('options_reflections_change', [2])
self.update()
def reflection_all_button_function(self):
self.options.reflection = 3
Water.all_reflections_on()
self.fade_button(self.reflection_off_button)
self.fade_button(self.reflection_sky_button)
self.fade_button(self.reflection_default_button)
self.highlight_button(self.reflection_all_button)
self.update()
def shader_off_button_function(self):
if self.options.shader != 0:
self.display_restart_dialog()
self.options.shader = 0
self.fade_button(self.shader_on_button)
self.highlight_button(self.shader_off_button)
self.update()
def shader_on_button_function(self):
if self.options.shader != 1:
self.display_restart_dialog()
self.options.shader = 1
self.fade_button(self.shader_off_button)
self.highlight_button(self.shader_on_button)
self.update()
def simple_shadow_button_function(self):
try:
time_of_day_manager = base.cr.timeOfDayManager
except:
time_of_day_manager = None
else:
if time_of_day_manager:
time_of_day_manager.disableAvatarShadows()
self.options.shadow = 0
self.highlight_button(self.simple_shadow_button)
self.fade_button(self.shadow_button)
self.update()
return
def shadow_button_function(self):
try:
time_of_day_manager = base.cr.timeOfDayManager
except:
time_of_day_manager = None
else:
if time_of_day_manager:
time_of_day_manager.enableAvatarShadows()
self.options.shadow = 1
self.fade_button(self.simple_shadow_button)
self.highlight_button(self.shadow_button)
self.update()
return
def special_effects_low_button_function(self):
self.highlight_button(self.special_effects_low_button)
self.fade_button(self.special_effects_medium_button)
self.fade_button(self.special_effects_high_button)
self.options.special_effects = Options.SpecialEffectsLow
self.options.setRuntimeSpecialEffects()
def special_effects_medium_button_function(self):
self.fade_button(self.special_effects_low_button)
self.highlight_button(self.special_effects_medium_button)
self.fade_button(self.special_effects_high_button)
self.options.special_effects = Options.SpecialEffectsMedium
self.options.setRuntimeSpecialEffects()
def special_effects_high_button_function(self):
self.fade_button(self.special_effects_low_button)
self.fade_button(self.special_effects_medium_button)
self.highlight_button(self.special_effects_high_button)
self.options.special_effects = Options.SpecialEffectsHigh
self.options.setRuntimeSpecialEffects()
def texture_low_button_function(self):
self.highlight_button(self.texture_low_button)
self.fade_button(self.texture_medium_button)
self.fade_button(self.texture_high_button)
self.fade_button(self.texture_maximum_button)
if self.options.texture_scale_mode:
if self.options.texture_scale != Options.texture_scale_low:
self.display_restart_dialog()
self.options.texture_scale = Options.texture_scale_low
self.setTextureScale()
else:
self.options.texture = Options.texture_low
def texture_medium_button_function(self):
self.fade_button(self.texture_low_button)
self.highlight_button(self.texture_medium_button)
self.fade_button(self.texture_high_button)
self.fade_button(self.texture_maximum_button)
if self.options.texture_scale_mode:
if self.options.texture_scale != Options.texture_scale_medium:
self.display_restart_dialog()
self.options.texture_scale = Options.texture_scale_medium
self.setTextureScale()
else:
self.options.texture = Options.texture_medium
def texture_high_button_function(self):
self.fade_button(self.texture_low_button)
self.fade_button(self.texture_medium_button)
self.highlight_button(self.texture_high_button)
self.fade_button(self.texture_maximum_button)
if self.options.texture_scale_mode:
if self.options.texture_scale != Options.texture_scale_high:
self.display_restart_dialog()
self.options.texture_scale = Options.texture_scale_high
self.setTextureScale()
else:
self.options.texture = Options.texture_high
def texture_maximum_button_function(self):
self.fade_button(self.texture_low_button)
self.fade_button(self.texture_medium_button)
self.fade_button(self.texture_high_button)
self.highlight_button(self.texture_maximum_button)
if self.options.texture_scale_mode:
if self.options.texture_scale != Options.texture_scale_maximum:
self.display_restart_dialog()
self.options.texture_scale = Options.texture_scale_maximum
self.setTextureScale()
else:
self.options.texture = Options.texture_scale_maximum
def texture_compression_button_function(self):
self.display_restart_dialog()
if self.options.textureCompression:
self.options.textureCompression = 0
else:
self.options.textureCompression = 1
if self.options.textureCompression:
self.highlight_button(self.texture_compression_button)
else:
self.fade_button(self.texture_compression_button)
def texture_compression_button_display(self):
if self.options.textureCompression:
self.highlight_button(self.texture_compression_button)
else:
self.fade_button(self.texture_compression_button)
def character_low_button_function(self):
self.highlight_button(self.character_low_button)
self.fade_button(self.character_medium_button)
self.fade_button(self.character_high_button)
level = Options.option_low
self.options.setRuntimeAvatarDetailLevel(self.options.character_detail_level)
self.options.character_detail_level = level
self.options.setRuntimeAvatarDetailLevel(level)
def character_medium_button_function(self):
self.fade_button(self.character_low_button)
self.highlight_button(self.character_medium_button)
self.fade_button(self.character_high_button)
level = Options.option_medium
self.options.character_detail_level = level
self.options.setRuntimeAvatarDetailLevel(level)
def character_high_button_function(self):
self.fade_button(self.character_low_button)
self.fade_button(self.character_medium_button)
self.highlight_button(self.character_high_button)
level = Options.option_high
self.options.character_detail_level = level
self.options.setRuntimeAvatarDetailLevel(level)
def terrain_low_button_function(self):
self.highlight_button(self.terrain_low_button)
self.fade_button(self.terrain_medium_button)
self.fade_button(self.terrain_high_button)
level = Options.option_low
self.options.terrain_detail_level = level
self.options.setRuntimeGridDetailLevel(level)
def terrain_medium_button_function(self):
self.fade_button(self.terrain_low_button)
self.highlight_button(self.terrain_medium_button)
self.fade_button(self.terrain_high_button)
level = Options.option_medium
self.options.terrain_detail_level = level
self.options.setRuntimeGridDetailLevel(level)
def terrain_high_button_function(self):
self.fade_button(self.terrain_low_button)
self.fade_button(self.terrain_medium_button)
self.highlight_button(self.terrain_high_button)
level = Options.option_high
self.options.terrain_detail_level = level
self.options.setRuntimeGridDetailLevel(level)
def aggressive_memory_button_function(self):
self.highlight_button(self.aggressive_memory_button)
self.fade_button(self.default_memory_button)
self.options.memory = 1
self.setLowMemory()
def default_memory_button_function(self):
self.fade_button(self.aggressive_memory_button)
self.highlight_button(self.default_memory_button)
self.options.memory = 0
self.setLowMemory()
def setLowMemory(self):
if hasattr(base, 'setLowMemory'):
base.setLowMemory(self.options.memory)
def off_ship_vis_button_function(self):
self.highlight_button(self.off_ship_vis_button)
self.fade_button(self.low_ship_vis_button)
self.fade_button(self.high_ship_vis_button)
self.options.ocean_visibility = 0
if not base.overrideShipVisibility:
base.shipsVisibleFromIsland = 0
messenger.send('ship_vis_change', [0])
def low_ship_vis_button_function(self):
self.fade_button(self.off_ship_vis_button)
self.highlight_button(self.low_ship_vis_button)
self.fade_button(self.high_ship_vis_button)
self.options.ocean_visibility = 1
if not base.overrideShipVisibility:
base.shipsVisibleFromIsland = 1
messenger.send('ship_vis_change', [1])
def high_ship_vis_button_function(self):
self.fade_button(self.off_ship_vis_button)
self.fade_button(self.low_ship_vis_button)
self.highlight_button(self.high_ship_vis_button)
self.options.ocean_visibility = 2
if not base.overrideShipVisibility:
base.shipsVisibleFromIsland = 2
messenger.send('ship_vis_change', [2])
def sound_off_button_function(self):
self.options.sound = 0
self.fade_button(self.sound_on_button)
self.highlight_button(self.sound_off_button)
base.enableSoundEffects(False)
self.update()
def sound_on_button_function(self):
self.options.sound = 1
self.highlight_button(self.sound_on_button)
self.fade_button(self.sound_off_button)
base.enableSoundEffects(True)
self.update()
def music_off_button_function(self):
self.options.music = 0
self.fade_button(self.music_on_button)
self.highlight_button(self.music_off_button)
base.enableMusic(False)
self.update()
def music_on_button_function(self):
self.options.music = 1
self.highlight_button(self.music_on_button)
self.fade_button(self.music_off_button)
base.enableMusic(True)
self.update()
def mouse_look_off_button_function(self):
self.options.mouse_look = 0
self.fade_button(self.mouse_look_on_button)
self.highlight_button(self.mouse_look_off_button)
self.update()
def mouse_look_on_button_function(self):
self.options.mouse_look = 1
self.highlight_button(self.mouse_look_on_button)
self.fade_button(self.mouse_look_off_button)
self.update()
def ship_look_off_button_function(self):
self.options.ship_look = 0
self.fade_button(self.ship_look_on_button)
self.highlight_button(self.ship_look_off_button)
self.update()
def ship_look_on_button_function(self):
self.options.ship_look = 1
self.highlight_button(self.ship_look_on_button)
self.fade_button(self.ship_look_off_button)
self.update()
def cpu_frequency_warning_off_button_function(self):
self.options.cpu_frequency_warning = 0
self.fade_button(self.cpu_frequency_warning_on_button)
self.highlight_button(self.cpu_frequency_warning_off_button)
self.update()
def cpu_frequency_warning_on_button_function(self):
self.options.cpu_frequency_warning = 1
self.highlight_button(self.cpu_frequency_warning_on_button)
self.fade_button(self.cpu_frequency_warning_off_button)
self.update()
def open_key_mappings_page(self):
self.controlsFrame = BorderFrame(parent=self, relief=None, frameSize=(0, self.width - 0.15, 0, PiratesGuiGlobals.TextScaleLarge * 2.5), pos=(0.08, 0, self.height - 0.15 - PiratesGuiGlobals.TextScaleLarge * 2.5))
return
def gamma_off_button_function(self):
self.options.gamma_enable = 0
self.fade_button(self.gamma_on_button)
self.highlight_button(self.gamma_off_button)
if base.win and base.win.getGsg():
base.win.getGsg().restoreGamma()
self.update()
def gamma_on_button_function(self):
self.options.gamma_enable = 1
self.highlight_button(self.gamma_on_button)
self.fade_button(self.gamma_off_button)
if base.win and base.win.getGsg():
if self.options.gamma_enable:
base.win.getGsg().setGamma(self.options.optionsGammaToGamma(self.options.gamma))
self.update()
def hdr_off_button_function(self):
if self.options.hdr != 0:
self.display_restart_dialog()
self.options.hdr = 0
self.fade_button(self.hdr_on_button)
self.highlight_button(self.hdr_off_button)
self.update()
def hdr_on_button_function(self):
if self.options.hdr != 1:
self.display_restart_dialog()
self.options.hdr = 1
self.highlight_button(self.hdr_on_button)
self.fade_button(self.hdr_off_button)
self.update()
def delete(self):
pass
def set_options(self, change_display, restore=False):
if self.gui:
self.gui.set_options(change_display)
return
def update(self):
self.options.options_to_config()
def isHidden(self):
if self.gui:
return self.gui.isHidden()
def show(self):
self.restore_options = copy.copy(self.options)
if self.gui:
self.gui.show()
if hasattr(base, 'localAvatar') and base.localAvatar.isPopulated():
self.setTutPanelOptions()
self.gui.tutorialButton.show()
self.gui.tutorialButtonFrame.show()
else:
self.gui.tutorialButton.hide()
self.gui.tutorialButtonFrame.hide()
def hide(self, log=True):
self.delete_dialogs()
if self.gui:
self.gui.hide()
if log:
self.options.log('User Closed Options')
@classmethod
def width_to_resolution_id(self, width):
id = 1
index = 0
total_resolutions = len(base.resolution_table)
while index < total_resolutions:
if width == base.resolution_table[index][0]:
id = index
break
index += 1
return id
def setNonPaid(self):
for button in self.windowed_resolutions_button_array[2:]:
button.hide()
for button in self.fullscreen_resolutions_button_array:
button.hide()
def setPaid(self):
for button in self.windowed_resolutions_button_array[2:]:
button.show()
for button in self.fullscreen_resolutions_button_array:
button.show()
def initDisplayButtons(self):
self.inAdFrame = base.inAdFrame
if self.inAdFrame:
self.setNonPaid()
windowed_index = self.options.resolution
else:
windowed_index = self.resolutionToIndex(self.options.window_width, self.options.window_height, False)
self.setPaid()
fullscreen_index = self.resolutionToIndex(self.options.fullscreen_width, self.options.fullscreen_height, False)
for i in xrange(len(self.windowed_resolutions_button_array)):
if i == windowed_index:
self.highlight_button(self.windowed_resolutions_button_array[i])
else:
self.fade_button(self.windowed_resolutions_button_array[i])
for i in xrange(len(self.fullscreen_resolutions_button_array)):
if base.inAdFrame:
self.inactive_button(self.fullscreen_resolutions_button_array[i])
elif i == fullscreen_index:
self.highlight_button(self.fullscreen_resolutions_button_array[i])
else:
self.fade_button(self.fullscreen_resolutions_button_array[i])
def resolutionToIndex(self, width, height, fullscreen):
resolution_index = -1
if fullscreen:
resolution_table = base.fullscreen_resolution_table
else:
resolution_table = base.windowed_resolution_table
if resolution_table:
index = 0
total_resolutions = len(resolution_table)
while index < total_resolutions:
if width == resolution_table[index][0] and height == resolution_table[index][1]:
resolution_index = index
break
index += 1
return resolution_index
def setTextureScale(self):
self.options.setTextureScale()
def updateShipVisibility(self):
if self.enable_ship_visibility:
if self.options.ocean_visibility == 0:
self.off_ship_vis_button_function()
elif self.options.ocean_visibility == 1:
self.low_ship_vis_button_function()
elif self.options.ocean_visibility == 2:
self.high_ship_vis_button_function()
def setTutPanelOptions(self):
inv = base.localAvatar.getInventory()
if inv:
self.tutPanelOptions = [
inv.getStackQuantity(InventoryType.TutTypeBasic), inv.getStackQuantity(InventoryType.TutTypeIntermediate), inv.getStackQuantity(InventoryType.TutTypeAdvanced)]
if self.gui:
self.gui.setTutPanelOptions() | [
"[email protected]"
] | |
c142dee0cde1b43a81c0d5697afeee12e008eb37 | 243d0543f8d38f91954616c014456122292a1a3c | /CS1/0320_herbivwar/draft04/critterAI1.py | 90f7ec363bc8911db4de6b7fb77c35166c061e2a | [
"MIT"
] | permissive | roni-kemp/python_programming_curricula | 758be921953d82d97c816d4768fbcf400649e969 | eda4432dab97178b4a5712b160f5b1da74c068cb | refs/heads/master | 2023-03-23T13:46:42.186939 | 2020-07-15T17:03:34 | 2020-07-15T17:03:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,944 | py | import random, critter
class CritterAI1(critter.Critter):
def __init__(self, screen, row, col, image, team, name):
super().__init__(screen, row, col, image, team, name)
def reproduce(self, critter_cells):
'''You have to have this in each critter child
class so that CritterAIs can reproduce with the
same AI and not with a generic critter that
happens to share the same image!'''
new_child = CritterAI1(self.screen, self.row, self.col, self.original_image, self.team, self.name)
self.customReproduce(critter_cells, new_child)
def takeAction(self, board, critter_cells):
'''TODO: Students write code here.
Each creature only takes one action per turn.
The action is the value returned, one of:
'reproduce', 'eat', 'attack', 'left',
'right', 'move', 'rest'
Fatigue is the amount of energy consumed by
each action.
#how much fatigue do I have?
print(self.fatigue)
print("losing "+str(self.fatigue)+" energy this turn")
#If fatigue is too high, reset fatigue to zero
if self.fatigue > 4:
return 'rest'
The eat action consumes all the energy at the
current location.
#If there is lots of energy here, then eat.
if self.energyHere(board) > 10:
return 'eat'
#Eat if I am low on energy
if self.energy < 3:
return 'eat'
#Don't walk out of bounds
if self.aheadInBounds(critter_cells):
return 'move'
else:
return 'right'
#Check to make sure ahead is inbounds and
#not blocked by another creature before
#reproducing.
if self.aheadInBounds(critter_cells) and self.getCritterAhead(critter_cells)==None:
return 'reproduce'
#If there is an enemy ahead, attack them.
other_critter = self.getCritterAhead(critter_cells)
if other_critter != None and other_critter.team != self.team:
return 'attack'
#Choose one of three random actions
r = random.randint(0,2)
if r==0:
return 'left'
elif r==1:
return 'right'
else:
return 'rest'
'''
#reproduce
if self.energy > 4:
if self.aheadInBounds(critter_cells) and self.getCritterAhead(critter_cells)==None:
return 'reproduce'
#rest
if self.fatigue > 1:
return 'rest'
#eat
if self.energyHere(board) > 4:
return 'eat'
#fight
other = self.getCritterAhead(critter_cells)
if other!=None and other.team!=self.team:
return 'attack'
#Otherwise random
r = random.randint(0,2)
if r==0:
return 'left'
elif r==1:
return 'right'
else:
return 'rest'
| [
"[email protected]"
] | |
4685ca9231aef2f67fedb4b4c61bf9b489279acf | 9cd27764abf5412dffd351a0a4956075191dd67e | /day 2/rename_file.py | 932ad167449c991f82661fd48217b3e1ca8221aa | [] | no_license | EHwooKim/Start_camp | 1cebb6775f68af1ca6ecaf37bc8ad4d0864e77de | d582e20ce1a3ccb9cd31422b3d5be3b64c2627d5 | refs/heads/master | 2020-06-17T15:26:14.898280 | 2019-08-05T00:12:59 | 2019-08-05T00:12:59 | 195,963,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | import os
# 1. dummy 폴더로 들어간다.
os.chdir('./dummy')
print(os.getcwd())
# 2. 하나씩 파일명을 변경한다. => 반복문
files = os.listdir('.')
print(type(files))
#for file in files:
# os.rename(file, f'SAMSUNG_{file}')
# 3. SAMSUNG이 아니라 SSAFY를 붙였어야지!
for file in files:
os.rename(file, file.replace('SAMSUNG_SAMSUNG','SSAFY'))
| [
"[email protected]"
] | |
057ae25723d470ecd9c4657c06e722e7cb93acd7 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/50/usersdata/111/17678/submittedfiles/contido.py | 67535e3f4f65734c36914824446b50ff8537f736 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | # -*- coding: utf-8 -*-
from __future__ import division
def incluso(a,b):
cont=0
for i in range(0,len(a),1):
if a[i] in b:
cont = cont +1
if cont!=0:
return True
else:
return False
n=input('Tamanho da lista : ')
a=[]
for i in range(0,n,1):
a.append(input('Elementos da lista1: '))
n2=input('Tamanho da lista : ')
b=[]
for i in range(0,n2,1):
b.append(input('Elementos da lista2: ')
if incluso(a,b)
| [
"[email protected]"
] | |
4c371b4f7afd64a5b4c2bd58df54c3b208d71660 | 55a4cd36bbea38fda0792020225d849dbc23f4a8 | /initUserState.py | 866f870de02309f8d88e1dc455f61454eb88a387 | [] | no_license | liyonghelpme/findAMatch | 4e4e4960715292e14aa59bbec5407d38562fb4c5 | 3e8d96106247b72c4b89412ab290d4f6462bdcea | refs/heads/master | 2021-01-19T08:10:31.909865 | 2013-05-04T11:06:16 | 2013-05-04T11:06:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | #coding:utf8
import MySQLdb
import random
myCon = MySQLdb.connect(host='localhost', passwd='badperson3', db='UserMatch', user='root', charset='utf8')
for i in xrange(0, 100):
sql = 'insert into UserState (uid, shieldTime, attackTime, onlineTime, score) values(%d, %d, %d, %d, %d)' % (i, 0, 0, 0, random.randint(0, 1000))
myCon.query(sql)
#测试保护状态的用户数据
#攻击状态用户数据
#在线状态用户数据
myCon.commit()
myCon.close()
| [
"[email protected]"
] | |
bd679510fd69971fb1346300fec7f9ac0725b03b | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/453/usersdata/281/109875/submittedfiles/programa.py | 22338be239a906640e2e45b03fa532fc2036ad96 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | # -*- coding: utf-8 -*-
n=int(input('Digite a dimensão do tabuleiro: '))
m=[]
for i in range (0,n,1):
m_linha=[]
for j in range(0,n,1):
m_linha.append(int(input('Digite o números (%d,%d) de entrada: '% (i+1,j+1))))
m.append(m_linha)
soma_linha=[]
for i in range(0,n,1):
c=0
for j in range(0.n,1):
c=c+m[i][j]
soma_linha.append(c)
soma_coluna=0
for j in range(0,n,1):
c2=0
for i in range(0,n,1):
c2=c2+m[i][j]
soma_coluna.append(c2)
peça=0
for i in range(0,n,1):
for j in range(0,n,1):
if (soma_linha[i]+soma_coluna[j]-2*m[i][j]>peça:
peça=soma_linha[i]+soma_coluna[j]-2*m[i][j]
print(peça) | [
"[email protected]"
] | |
5acb43c07eab61bc8a8fd5259359e49af52e3a64 | bbb21bb79c8c3efbad3dd34ac53fbd6f4590e697 | /restAPI/manage.py | 6f0f73c179c0948e0594011d582e9bccf06ad320 | [] | no_license | Nusmailov/BFDjango | b14c70c42da9cfcb68eec6930519da1d0b1f53b6 | cab7f0da9b03e9094c21efffc7ab07e99e629b61 | refs/heads/master | 2020-03-28T21:11:50.706778 | 2019-01-21T07:19:19 | 2019-01-21T07:19:19 | 149,136,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'restAPI.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
8871075a56cf9bb00ab3a122de9f2f785bb77da6 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_088/ch70_2020_09_10_19_51_05_981407.py | 124c962afd71fd9efb28559670d236a70a72b1d4 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | def esconde_senha(n):
senha='*'n
return senha | [
"[email protected]"
] | |
050629c4fcfc80b4ca7aa69dbbfa34c66605e8f1 | 76050b0002dac757866a9fb95dc199918da665bb | /examples/multiagent/multigrid/run_multigrid.py | 0b5ef0a06d99e8fcd41152dacded9ea9f749c7e6 | [
"Apache-2.0"
] | permissive | RaoulDrake/acme | 2829f41688db68d694da2461d301fd6f9f27edff | 97c50eaa62c039d8f4b9efa3e80c4d80e6f40c4c | refs/heads/master | 2022-12-29T01:16:44.806891 | 2022-12-21T14:09:38 | 2022-12-21T14:10:06 | 300,250,466 | 0 | 0 | Apache-2.0 | 2020-10-01T11:13:03 | 2020-10-01T11:13:02 | null | UTF-8 | Python | false | false | 4,123 | py | # python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multiagent multigrid training run example."""
from typing import Callable, Dict
from absl import flags
from acme import specs
from acme.agents.jax.multiagent import decentralized
from absl import app
import helpers
from acme.jax import experiments
from acme.jax import types as jax_types
from acme.multiagent import types as ma_types
from acme.utils import lp_utils
from acme.wrappers import multigrid_wrapper
import dm_env
import launchpad as lp
FLAGS = flags.FLAGS
_RUN_DISTRIBUTED = flags.DEFINE_bool(
'run_distributed', True, 'Should an agent be executed in a distributed '
'way. If False, will run single-threaded.')
_NUM_STEPS = flags.DEFINE_integer('num_steps', 10000,
'Number of env steps to run training for.')
_EVAL_EVERY = flags.DEFINE_integer('eval_every', 1000,
'How often to run evaluation.')
_ENV_NAME = flags.DEFINE_string('env_name', 'MultiGrid-Empty-5x5-v0',
'What environment to run.')
_BATCH_SIZE = flags.DEFINE_integer('batch_size', 64, 'Batch size.')
_SEED = flags.DEFINE_integer('seed', 0, 'Random seed.')
def _make_environment_factory(env_name: str) -> jax_types.EnvironmentFactory:
def environment_factory(seed: int) -> dm_env.Environment:
del seed
return multigrid_wrapper.make_multigrid_environment(env_name)
return environment_factory
def _make_network_factory(
agent_types: Dict[ma_types.AgentID, ma_types.GenericAgent]
) -> Callable[[specs.EnvironmentSpec], ma_types.MultiAgentNetworks]:
def environment_factory(
environment_spec: specs.EnvironmentSpec) -> ma_types.MultiAgentNetworks:
return decentralized.network_factory(environment_spec, agent_types,
helpers.init_default_multigrid_network)
return environment_factory
def build_experiment_config() -> experiments.ExperimentConfig[
ma_types.MultiAgentNetworks, ma_types.MultiAgentPolicyNetworks,
ma_types.MultiAgentSample]:
"""Returns a config for multigrid experiments."""
environment_factory = _make_environment_factory(_ENV_NAME.value)
environment = environment_factory(_SEED.value)
agent_types = {
str(i): decentralized.DefaultSupportedAgent.PPO
for i in range(environment.num_agents) # pytype: disable=attribute-error
}
# Example of how to set custom sub-agent configurations.
ppo_configs = {'unroll_length': 16, 'num_minibatches': 32, 'num_epochs': 10}
config_overrides = {
k: ppo_configs for k, v in agent_types.items() if v == 'ppo'
}
configs = decentralized.default_config_factory(agent_types, _BATCH_SIZE.value,
config_overrides)
builder = decentralized.DecentralizedMultiAgentBuilder(
agent_types=agent_types, agent_configs=configs)
return experiments.ExperimentConfig(
builder=builder,
environment_factory=environment_factory,
network_factory=_make_network_factory(agent_types=agent_types),
seed=_SEED.value,
max_num_actor_steps=_NUM_STEPS.value)
def main(_):
config = build_experiment_config()
if _RUN_DISTRIBUTED.value:
program = experiments.make_distributed_experiment(
experiment=config, num_actors=4)
lp.launch(program, xm_resources=lp_utils.make_xm_docker_resources(program))
else:
experiments.run_experiment(
experiment=config, eval_every=_EVAL_EVERY.value, num_eval_episodes=5)
if __name__ == '__main__':
app.run(main)
| [
"[email protected]"
] | |
e7cb34776c3c785a60bea9c5a22fb9ffeb4f16e9 | 6220d04a60dae0e44ba2232cba6c79114bf2fd3f | /test_remote_project/test_remote_project/urls.py | 565409fca290c5720a80e2fa65d99ec0a96bd81b | [
"MIT"
] | permissive | ollytheninja/django-autocomplete-light | bc65e92ffddbe1332a59bf4d6248976e8c0542b4 | 6e4723c4a75870e7421cb5656248ef2b61eeeca6 | refs/heads/master | 2021-01-21T03:50:46.975650 | 2015-10-07T23:20:47 | 2015-10-07T23:20:47 | 44,034,996 | 0 | 1 | MIT | 2020-10-01T19:22:48 | 2015-10-11T01:36:02 | Python | UTF-8 | Python | false | false | 761 | py | from django.conf.urls import patterns, include, url
import autocomplete_light
autocomplete_light.autodiscover()
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'test_project.views.home', name='home'),
# url(r'^test_project/', include('test_project.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^autocomplete/', include('autocomplete_light.urls')),
url(r'^navigation/', include('navigation_autocomplete.urls')),
)
| [
"[email protected]"
] | |
cccc7cd7cbbfc831f29622384dbebe04357cdfdf | d5682d2ef13ad63c68d59d3d0706853a88035ff1 | /week3/snmp_app.py | b8ca881ab0a218a4afdf708477df3f8fd7f7a8a6 | [
"Apache-2.0"
] | permissive | mikealford/ktbyers_automation | 66467f5352a3fbb111fc18f9c90b83cf97a75e79 | d8b30e7ddbe27b4bc62b74bfc051b6d1c099f7f9 | refs/heads/master | 2020-04-17T19:37:42.365653 | 2019-02-19T01:16:41 | 2019-02-19T01:16:41 | 166,872,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | import snmp_helper
IP = '192.168.122.172'
COMMUNITY_STRING = 'galileo1'
SNMP_PORT = 161
pynet_rtr1 = (IP, COMMUNITY_STRING, SNMP_PORT)
snmp_data = snmp_helper.snmp_get_oid(pynet_rtr1, oid='1.3.6.1.2.1.1.5.0')
output = snmp_helper.snmp_extract(snmp_data)
print(output)
| [
"[email protected]"
] | |
db966fb201c69d1a48e53305e1b2f6e3e7545f15 | d8cf5130deaf3bfba0e5b9d326012fbcddd18fb2 | /embed/models/position_embedding_heads/avg_embedding_head.py | 7600e1b37195e715a9b593977585a70e4af69f1a | [] | no_license | xmyqsh/embed | 0ed297e636fb9d9db20dd908f92ef6212b9c3549 | 46d07ddf6c5908bdfff86ca3203c325ad34423dc | refs/heads/master | 2023-06-11T08:25:37.733238 | 2021-06-30T10:41:24 | 2021-06-30T10:41:24 | 379,320,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,392 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from embed.cv.cnn import ConvModule, bias_init_with_prob, normal_init
from embed.cv.runner import auto_fp16
from embed.core.utils import multi_apply
from embed.models import POSITION_EMBEDDING_HEADS
from .base_position_embedding_head import BasePositionEmbeddingHead
import numpy as np
@POSITION_EMBEDDING_HEADS.register_module()
class AvgEmbeddingHead(BasePositionEmbeddingHead):
r"""Average Embedding Head.
Args:
in_channels (int):
num_classes (int):
scale_ranges (List[Tuple]):
thres (float):
"""
def __init__(self,
ignore_val=255,
*args,
**kwargs):
super(AvgEmbeddingHead, self).__init__(*args, **kwargs)
self.ignore_val = ignore_val
# TODO(ljm) add this param into a proper place
self.scale_factor = 1. / 4
def forward_train(self, x, pred_weights, gt_semantic_seg):
pred_regions = self(x)
gt_scoremap, gt_sem_label, gt_sem_mask, gt_sem_class, num_sts = \
self.get_targets(pred_regions, gt_semantic_seg)
loss_pos_st = self.loss(pred_regions, gt_scoremap)
gt_guided_idx_feat_sts = self.get_gt_guided_positions(pred_weights,
gt_sem_mask, num_sts)
return dict(loss_pos_st=loss_pos_st), \
dict(gt_guided_idx_feat_sts=gt_guided_idx_feat_sts,
gt_sem_label=gt_sem_label,
gt_sem_class=gt_sem_class,
num_sts=num_sts)
def get_targets(self, pred_regions, gt_semantic_seg):
gt_semantic_seg[gt_semantic_seg == self.ignore_val] = self.num_classes
gt_semantic_seg = F.one_hot(gt_semantic_seg.squeeze(1).long(),
num_classes=self.num_classes + 1)[..., :-1]
gt_semantic_seg = gt_semantic_seg.permute(0, 3, 1, 2).float().contiguous()
'''
gt_semantic_seg = F.interpolate(gt_semantic_seg, scale_factor=self.scale_factor,
mode='bilinear',
align_corners=False).clamp(max=1.0)
'''
return multi_apply(self.get_target_single_level, pred_regions,
gt_semantic_seg=gt_semantic_seg)
def get_target_single_level(self, pred_region, gt_semantic_seg):
gt_scoremap = F.interpolate(gt_semantic_seg, size=pred_region.shape[-2:],
mode='bilinear',
align_corners=False).clamp(max=1.0)
gt_scoremap[gt_scoremap < 0.5] = 0.0
gt_assign_mask = gt_scoremap.reshape(*gt_scoremap.shape[:-2], -1).sum(dim=-1) > 0
gt_sem_label, gt_sem_mask, gt_sem_class, num_sts = \
multi_apply(self.get_target_single_image, gt_semantic_seg, gt_scoremap, gt_assign_mask)
return gt_scoremap, gt_sem_label, gt_sem_mask, gt_sem_class, num_sts
def get_target_single_image(self, gt_semantic_seg, gt_scoremap, gt_assign_mask):
gt_sem_class = torch.nonzero(gt_assign_mask, as_tuple=False).squeeze(-1)
num_sem = gt_assign_mask.sum().item()
gt_sem_label = gt_semantic_seg[gt_assign_mask]
gt_sem_mask = gt_scoremap[gt_assign_mask].bool().float()
return gt_sem_label, gt_sem_mask, gt_sem_class, num_sem
def loss(self, pred_regions, gt_scoremap):
return list(map(self.loss_single_level, pred_regions, gt_scoremap))
def loss_single_level(self, pred_region, gt_scoremap):
b, c = pred_region.shape[:2]
loss_pos = self.loss_pos(pred_region, gt_scoremap, reduction_override='none')
loss_pos = loss_pos.reshape(b, c, -1).mean(dim=-1)
loss_pos = loss_pos.sum() / b
return loss_pos
def get_gt_guided_positions(self, pred_weights, gt_sem_mask, num_sts):
return list(map(self.get_gt_guided_position_single_level, pred_weights,
gt_sem_mask,
num_sts))
def get_gt_guided_position_single_level(self, pred_weight, gt_sem_mask, num_sts):
idx_feat_sts = list(map(lambda a, b: a.unsqueeze(0) * b.unsqueeze(1),
pred_weight, gt_sem_mask))
idx_feat_st = torch.cat(idx_feat_sts, dim=0)
idx_feat_st = F.adaptive_avg_pool2d(idx_feat_st, output_size=1).squeeze(-1).squeeze(-1)
return torch.split(idx_feat_st, num_sts, dim=0)
def get_positions(self, pred_regions, pred_weights):
return multi_apply(self.get_position_single_level, pred_regions, pred_weights)
def get_position_single_level(self, pred_region, pred_weight):
pred_region = pred_region.sigmoid()
pred_cate = pred_region.argmax(dim=1)
pred_st_mask = F.one_hot(pred_cate, num_classes=self.num_classes)
pred_st_mask = pred_st_mask.permute(0, 3, 1, 2).contiguous()
score_st = (pred_region * pred_st_mask).reshape(*pred_region.shape[:2], -1)
idx_feat_sts, class_sts, score_sts, num_sts = \
multi_apply(self.get_position_single_image, pred_cate,
pred_st_mask,
score_st,
pred_weight)
idx_feat_st = torch.cat(idx_feat_sts, dim=0)
idx_feat_st = F.adaptive_avg_pool2d(idx_feat_st, output_size=1).squeeze(-1).squeeze(-1)
idx_feat_sts = torch.split(idx_feat_st, num_sts, dim=0)
return idx_feat_sts, class_sts, score_sts, num_sts
def get_position_single_image(self, pred_cate, pred_st_mask, score_st, pred_weight):
class_st, num_class_st = torch.unique(pred_cate, return_counts=True)
score_st = (score_st[class_st].sum(dim=-1) / num_class_st)
pred_st_mask = pred_st_mask[class_st]
keep = score_st > self.thres
class_st, score_st, pred_st_mask = class_st[keep], score_st[keep], pred_st_mask[keep]
num_st = keep.sum()
idx_feat_st = pred_st_mask.unsqueeze(1) * pred_weight.unsqueeze(0)
return idx_feat_st, class_st, score_st, num_st
| [
"[email protected]"
] | |
c607e65351a974ba2577b564935d2ab18480067d | d039f2a5bbf15cb8bfb99e3b67354a23f2065745 | /sisco1/app1/migrations/0001_initial.py | 5d6ceeea2fd02745addadf912f0c2a3ff4d28b0e | [] | no_license | prasadnaidu1/django | 22ad934eaee1079fbcd0a988245bdc18789a48f1 | b64a341d7c8916dd8000d85b738241c0b369b229 | refs/heads/master | 2020-04-12T19:21:42.565686 | 2019-04-18T16:35:16 | 2019-04-18T16:35:16 | 162,707,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | # Generated by Django 2.1.1 on 2018-10-18 11:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='friends',
fields=[
('entry', models.IntegerField(default=10, primary_key=True, serialize=False)),
('date', models.DateField()),
('amount', models.DecimalField(decimal_places=2, max_digits=10)),
('members', models.CharField(max_length=50)),
],
),
]
| [
"[email protected]"
] | |
87c583b7c77c5623c85ce162a31ae8d56854f57f | d5ed141e513dcb6fc8ab851835ec9a4630e3651b | /anaconda/anaconda/lib/python2.7/site-packages/anaconda_navigator/widgets/dialogs/tests/test_update_dialog.py | 133fa37a1c4ac99aae7e8bf6af80e4f6edc4f01d | [
"Python-2.0"
] | permissive | starrysky1211/starrysky | 713998b366449a5ae4371e38723c56ea40532593 | abb642548fb9b431551133657f1a67858041a7e6 | refs/heads/master | 2022-11-09T21:51:22.558151 | 2017-02-25T14:42:37 | 2017-02-25T14:42:37 | 67,608,074 | 0 | 1 | null | 2022-10-16T05:17:25 | 2016-09-07T13:16:45 | Python | UTF-8 | Python | false | false | 1,165 | py | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2016 Continuum Analytics, Inc.
#
# May be copied and distributed freely only as part of an Anaconda or
# Miniconda installation.
# -----------------------------------------------------------------------------
"""
Tests for update dialog.
"""
# Third party imports
from qtpy.QtCore import Qt # analysis:ignore
import pytest
import pytestqt.qtbot as qtbot # analysis:ignore
# Local imports
from anaconda_navigator.widgets.dialogs.update import DialogUpdateApplication
@pytest.fixture
def updatedialog(qtbot):
widget = DialogUpdateApplication("1.0")
widget.show()
qtbot.addWidget(widget)
return qtbot, widget
class TestUpdateDialog:
def test_yes(self, updatedialog):
qtbot, widget = updatedialog
with qtbot.waitSignal(widget.accepted, 1000, True):
qtbot.mouseClick(widget.button_yes, Qt.LeftButton)
def test_no(self, updatedialog):
qtbot, widget = updatedialog
with qtbot.waitSignal(widget.rejected, 1000, True):
qtbot.mouseClick(widget.button_no, Qt.LeftButton)
| [
"[email protected]"
] | |
013dd0590c4d0f506c064b1f272085f45e765844 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2541/60698/283571.py | dff37af14ff1eafccf659e6c4b6fba4047e355ac | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,407 | py | def test():
n = int(input())
matches = list(eval(input()))
segList = []
for match in matches:
if not getSegList(segList, match):
print('[]')
return
res = []
for seg in segList:
topology(res, seg)
for i in range(0, n):
if i not in res:
res.append(i)
print(res)
def getSegList(segList, match) -> bool:
thisCourse = match[0]
beforeCourse = match[1]
if segList == []:
segList.append([beforeCourse, thisCourse])
return True
else:
for i in range(0, len(segList)):
seg = list(segList[i])
for j in range(0, len(seg)):
for k in range(0,j):
if seg[k]==thisCourse and seg[j]==beforeCourse:
return False
if seg[-1] == beforeCourse:
seg.append(thisCourse)
segList.pop(i)
segList.insert(i,seg)
return True
segList.append([beforeCourse, thisCourse])
return True
def topology(res, seg):
ind = -1
for i in range(0, len(seg)):
if seg[i] in res:
ind = res.index(seg[i]) + 1
continue
else:
if ind == -1 or ind >= len(res):
res.append(seg[i])
else:
res.insert(ind, seg[i])
ind = ind + 1
test()
| [
"[email protected]"
] | |
de3583f4711cd44ff8a4fe3228a6ec6a3a7093e0 | b15d2787a1eeb56dfa700480364337216d2b1eb9 | /samples/cli/accelbyte_py_sdk_cli/achievement/_admin_create_new_achievement.py | a6fedd2b1b5536aa5a2b987bb622cd5b4b5b5def | [
"MIT"
] | permissive | AccelByte/accelbyte-python-sdk | dedf3b8a592beef5fcf86b4245678ee3277f953d | 539c617c7e6938892fa49f95585b2a45c97a59e0 | refs/heads/main | 2023-08-24T14:38:04.370340 | 2023-08-22T01:08:03 | 2023-08-22T01:08:03 | 410,735,805 | 2 | 1 | MIT | 2022-08-02T03:54:11 | 2021-09-27T04:00:10 | Python | UTF-8 | Python | false | false | 2,699 | py | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# AGS Achievement Service (2.21.0)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.achievement import (
admin_create_new_achievement as admin_create_new_achievement_internal,
)
from accelbyte_py_sdk.api.achievement.models import ModelsAchievementRequest
from accelbyte_py_sdk.api.achievement.models import ModelsAchievementResponse
from accelbyte_py_sdk.api.achievement.models import ResponseError
@click.command()
@click.argument("body", type=str)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def admin_create_new_achievement(
body: str,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(admin_create_new_achievement_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {"Authorization": login_with_auth}
else:
login_as_internal(login_as)
if body is not None:
try:
body_json = json.loads(body)
body = ModelsAchievementRequest.create_from_dict(body_json)
except ValueError as e:
raise Exception(f"Invalid JSON for 'body'. {str(e)}") from e
result, error = admin_create_new_achievement_internal(
body=body,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"AdminCreateNewAchievement failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
admin_create_new_achievement.operation_id = "AdminCreateNewAchievement"
admin_create_new_achievement.is_deprecated = False
| [
"[email protected]"
] | |
050ffca21e8e249c55e742ca4256ecf84715d92b | 52a32a93942b7923b7c0c6ca5a4d5930bbba384b | /unittests/test_adminsite.py | bc48a7ea4540ec9daadd22614619ced502491a61 | [
"MIT-open-group",
"GCC-exception-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LGPL-3.0-only",
"GPL-3.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-3.0-or-later",
"IJG",
"Zlib",
"LicenseRef-scancode-proprietary-license",
"PSF-2.0",
"LicenseRef-scancode-python-cwi",
"GPL-2.0-or-later",
"HPND",
"libtiff",
"LGPL-2.1-or-later",
"EPL-2.0",
"GPL-3.0-only",
"MIT",
"BSD-3-Clause-Modification",
"LicenseRef-scancode-public-domain-disclaimer",
"HPND-Markus-Kuhn",
"CC-BY-SA-4.0",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-only",
"LicenseRef-scancode-openssl-exception-lgpl3.0plus",
"Libpng",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"BSD-Advertising-Acknowledgement",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"MIT-Modern-Variant",
"ISC",
"GPL-2.0-only",
"LicenseRef-scancode-xfree86-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown",
"BSD-2-Clause"
] | permissive | DefectDojo/django-DefectDojo | 43bfb1c728451335661dadc741be732a50cd2a12 | b98093dcb966ffe972f8719337de2209bf3989ec | refs/heads/master | 2023-08-21T13:42:07.238370 | 2023-08-14T18:00:34 | 2023-08-14T18:00:34 | 31,028,375 | 2,719 | 1,666 | BSD-3-Clause | 2023-09-14T19:46:49 | 2015-02-19T17:53:47 | HTML | UTF-8 | Python | false | false | 938 | py | from .dojo_test_case import DojoTestCase
from django.contrib import admin
import django.apps
class AdminSite(DojoTestCase):
fixtures = ['dojo_testdata.json']
def test_is_model_defined(self):
for subclass in django.apps.apps.get_models():
if subclass._meta.proxy:
continue
if subclass.__module__ == 'dojo.models':
if not ((subclass.__name__[:9] == "Tagulous_") and (subclass.__name__[-5:] == "_tags")):
with self.subTest(type="base", subclass=subclass):
self.assertIn(subclass, admin.site._registry.keys(), "{} is not registered in 'admin.site' in models.py".format(subclass))
else:
with self.subTest(type="tag", subclass=subclass):
self.assertIn(subclass, admin.site._registry.keys(), "{} is not registered in 'tagulous.admin' in models.py".format(subclass))
| [
"[email protected]"
] | |
2dd33060faa5ea97aa63c818cbb22f222dffe9f7 | 85e3baf2668db0592df3d9f9aa447b9f20ef25d6 | /ImageManipulations/BitwiseOperations.py | 72025d6bb36fc3b91511cc5460f59a6ff32501ee | [] | no_license | pekkipo/Computer_Vision | cd33a93f5706cd933761d02735f943354c6468fc | 55aaeae5b955540722270aab07295e85cfa26a95 | refs/heads/master | 2021-01-20T06:54:45.912254 | 2017-05-22T12:59:28 | 2017-05-22T12:59:28 | 89,944,925 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,394 | py | # Very useful for masking the images
import cv2
import numpy as np
# CREATE HALF AN ELLIPSE AND A RECTANGLE
# If you're wondering why only two dimensions, well this is a grayscale image,
# if we doing a colored image, we'd use
# rectangle = np.zeros((300, 300, 3),np.uint8)
# Making a square
square = np.zeros((300, 300), np.uint8)
cv2.rectangle(square, (50, 50), (250, 250), 255, -2)
cv2.imshow("Square", square)
cv2.waitKey(0)
# Making a ellipse
ellipse = np.zeros((300, 300), np.uint8)
cv2.ellipse(ellipse, (150, 150), (150, 150), 30, 0, 180, 255, -1)
cv2.imshow("Ellipse", ellipse)
cv2.waitKey(0)
cv2.destroyAllWindows()
# BITWISE OPERATIONS
# square and ellipse have to be of same dimensions
# Shows only where they intersect
And = cv2.bitwise_and(square, ellipse)
cv2.imshow("AND", And)
cv2.waitKey(0)
# Shows where either square or ellipse is
bitwiseOr = cv2.bitwise_or(square, ellipse)
cv2.imshow("OR", bitwiseOr)
cv2.waitKey(0)
# Shows where either exist by itself. Everything that both ellipse and rectangle will be black
bitwiseXor = cv2.bitwise_xor(square, ellipse)
cv2.imshow("XOR", bitwiseXor)
cv2.waitKey(0)
# Shows everything that isn't part of the square
bitwiseNot_sq = cv2.bitwise_not(square) # takes one figure. Doing inverse
cv2.imshow("NOT - square", bitwiseNot_sq)
cv2.waitKey(0)
### Notice the last operation inverts the image totally
cv2.destroyAllWindows() | [
"[email protected]"
] | |
9605f4f4ccc4457804b9e7322ac882c255c923d5 | 691f49708fa5121e261650f01f2e9b93e9bdd26f | /skills.py | 01571c068d6ecf27898b218f52ef8c80bffac8a8 | [] | no_license | michelelee/skills-dictionaries- | 29a14980308ea1cc3b76bcc1f1c72a0236fd91e8 | 9c029792ab238f463b3642815b5dd6316299a6b3 | refs/heads/master | 2021-01-10T19:43:29.314898 | 2015-04-21T15:59:29 | 2015-04-21T15:59:29 | 34,235,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,307 | py | # To work on the advanced problems, set to True
ADVANCED = False
def count_unique(string1):
"""Count unique words in a string.
This function should take a single string and return a dictionary
that has all of the distinct words as keys, and the number of times
that word appears in the string.
For example:
>>> print_dict(count_unique("each word appears once"))
{'appears': 1, 'each': 1, 'once': 1, 'word': 1}
Words that appear more than once should be counted each time:
>>> print_dict(count_unique("rose is a rose is a rose"))
{'a': 2, 'is': 2, 'rose': 3}
It's fine to consider punctuation part of a word (e.g., a comma
at the end of a word can be counted as part of that word) and
to consider differently-capitalized words as different:
>>> print_dict(count_unique("Porcupine see, porcupine do."))
{'Porcupine': 1, 'do.': 1, 'porcupine': 1, 'see,': 1}
"""
uniquedict = {}
string1 = string1.split()
for word in string1:
uniquedict[word] = uniquedict.setdefault(word, 0) + 1
return uniquedict
print count_unique("each word weird word word appears once")
def common_items(list1, list2):
"""Produce the set of common items in two lists.
Given two lists, return a list of the common items shared between
the lists.
IMPORTANT: you may not not 'if ___ in ___' or the method 'index'.
For example:
>>> sorted(common_items([1, 2, 3, 4], [1, 2]))
[1, 2]
If an item appears more than once in both lists, return it each
time:
>>> sorted(common_items([1, 2, 3, 4], [1, 1, 2, 2]))
[1, 1, 2, 2]
(And the order of which has the multiples shouldn't matter, either):
>>> sorted(common_items([1, 1, 2, 2], [1, 2, 3, 4]))
[1, 1, 2, 2]
"""
pass
def unique_common_items(list1, list2):
"""Produce the set of *unique* common items in two lists.
Given two lists, return a list of the *unique* common items shared between
the lists.
IMPORTANT: you may not not 'if ___ in ___' or the method 'index'.
Just like `common_items`, this should find [1, 2]:
>>> sorted(unique_common_items([1, 2, 3, 4], [1, 2]))
[1, 2]
However, now we only want unique items, so for these lists, don't show
more than 1 or 2 once:
>>> sorted(unique_common_items([1, 2, 3, 4], [1, 1, 2, 2]))
[1, 2]
"""
listdict = {}
newlist = []
for i in list1:
listdict[i] = listdict.setdefault(i, 0) + 1
for i in list2:
listdict[i] = listdict.setdefault(i, 0) + 1
for key in listdict:
if listdict[key] > 1:
newlist.append(key)
return newlist
print unique_common_items([1, 2, 3, 4], [1, 2, 2, 3, 7, 8, 9])
def sum_zero(list1):
"""Return list of x,y number pair lists from a list where x+y==0
Given a list of numbers, add up each individual pair of numbers.
Return a list of each pair of numbers that adds up to 0.
For example:
>>> sort_pairs( sum_zero([1, 2, 3, -2, -1]) )
[[-2, 2], [-1, 1]]
This should always be a unique list, even if there are
duplicates in the input list:
>>> sort_pairs( sum_zero([1, 2, 3, -2, -1, 1, 1]) )
[[-2, 2], [-1, 1]]
Of course, if there are one or more zeros to pair together,
that's fine, too:
>>> sort_pairs( sum_zero([1, 2, 3, -2, -1, 1, 0, 1, 0]) )
[[-2, 2], [-1, 1], [0, 0]]
"""
return []
def find_duplicates(words):
"""Given a list of words, return the list with duplicates removed.
For example:
>>> sorted(find_duplicates(
... ["rose", "is", "a", "rose", "is", "a", "rose"]))
['a', 'is', 'rose']
You should treat differently-capitalized words as different:
>>> sorted(find_duplicates(
... ["Rose", "is", "a", "rose", "is", "a", "rose"]))
['Rose', 'a', 'is', 'rose']
"""
listdict = {}
newlist = []
for i in words:
listdict[i] = listdict.setdefault(i, 0) + 1
print listdict
for key in listdict:
if listdict[key] == 1:
newlist.append(key)
return newlist
def word_length(words):
"""Given list of words, return list of ascending [(len, [words])].
Given a list of words, return a list of tuples, ordered by word-length.
Each tuple should have two items--the length of the words for that
word-length, and the list of words of that word length.
For example:
>>> word_length(["ok", "an", "apple", "a", "day"])
[(1, ['a']), (2, ['ok', 'an']), (3, ['day']), (5, ['apple'])]
"""
return []
def pirate_talk(phrase):
"""Translate phrase to pirate talk.
Given a phrase, translate each word to the Pirate-speak equivalent.
Words that cannot be translated into Pirate-speak should pass through
unchanged. Return the resulting sentence.
Here's a table of English to Pirate translations:
English Pirate
---------- ----------------
sir matey
hotel fleabag inn
student swabbie
boy matey
madam proud beauty
professor foul blaggart
restaurant galley
your yer
excuse arr
students swabbies
are be
lawyer foul blaggart
the th'
restroom head
my me
hello avast
is be
man matey
For example:
>>> pirate_talk("my student is not a man")
'me swabbie be not a matey'
You should treat words with punctuation as if they were different
words:
>>> pirate_talk("my student is not a man!")
'me swabbie be not a man!'
"""
return ""
def adv_word_length_sorted_words(words):
"""Given list of words, return list of ascending [(len, [sorted-words])].
Given a list of words, return a list of tuples, ordered by word-length.
Each tuple should have two items--the length of the words for that
word-length, and the list of words of that word length. The list of words
for that length should be sorted alphabetically.
For example:
>>> adv_word_length_sorted_words(["ok", "an", "apple", "a", "day"])
[(1, ['a']), (2, ['an', 'ok']), (3, ['day']), (5, ['apple'])]
"""
return []
##############################################################################
# You can ignore everything after here
def print_dict(d):
# This method is just used to print dictionaries in key-alphabetical
# order, and is only used for our documentation tests. You can ignore it.
if isinstance(d, dict):
print "{" + ", ".join("%r: %r" % (k, d[k]) for k in sorted(d)) + "}"
else:
print d
def sort_pairs(l):
# Print sorted list of pairs where the pairs are sorted. This is used only
# for documentation tests. You can ignore it.
return sorted(sorted(pair) for pair in l)
if __name__ == "__main__":
print
import doctest
for k, v in globals().items():
if k[0].isalpha():
if k.startswith('adv_') and not ADVANCED:
continue
a = doctest.run_docstring_examples(v, globals(), name=k)
print "** END OF TEST OUTPUT"
print
| [
"[email protected]"
] | |
93f309b526e26efd52fc955c3c44e08647392b4b | 7e3a2c989b81f425080c36f60ca0dca1e6a804ab | /cardpay/model/payment_callback.py | 43489e213e1613874a9b69b802907fb6fb4113cb | [
"MIT"
] | permissive | cardpay/python-sdk-v3 | 2a7c6ab4d8cfe0de2e9e362624ba4df49cd20445 | db1f250f0f6083497a468eeef0c537716a5e9fa8 | refs/heads/master | 2023-09-01T14:05:30.241622 | 2023-09-01T06:02:51 | 2023-09-01T06:02:51 | 197,133,457 | 6 | 6 | null | null | null | null | UTF-8 | Python | false | false | 11,888 | py | # coding: utf-8
"""
CardPay REST API
Welcome to the CardPay REST API. The CardPay API uses HTTP verbs and a [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) resources endpoint structure (see more info about REST). Request and response payloads are formatted as JSON. Merchant uses API to create payments, refunds, payouts or recurrings, check or update transaction status and get information about created transactions. In API authentication process based on [OAuth 2.0](https://oauth.net/2/) standard. For recent changes see changelog section. # noqa: E501
OpenAPI spec version: 3.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from cardpay.model.payment_request_customer import (
PaymentRequestCustomer,
) # noqa: F401,E501
from cardpay.model.payment_response_card_account import (
PaymentResponseCardAccount,
) # noqa: F401,E501
from cardpay.model.payment_response_cryptocurrency_account import (
PaymentResponseCryptocurrencyAccount,
) # noqa: F401,E501
from cardpay.model.payment_response_payment_data import (
PaymentResponsePaymentData,
) # noqa: F401,E501
from cardpay.model.transaction_response_e_wallet_account import (
TransactionResponseEWalletAccount,
) # noqa: F401,E501
from cardpay.model.transaction_response_merchant_order import (
TransactionResponseMerchantOrder,
) # noqa: F401,E501
class PaymentCallback(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
"callback_time": "str",
"card_account": "PaymentResponseCardAccount",
"cryptocurrency_account": "PaymentResponseCryptocurrencyAccount",
"customer": "PaymentRequestCustomer",
"ewallet_account": "TransactionResponseEWalletAccount",
"merchant_order": "TransactionResponseMerchantOrder",
"payment_data": "PaymentResponsePaymentData",
"payment_method": "str",
}
attribute_map = {
"callback_time": "callback_time",
"card_account": "card_account",
"cryptocurrency_account": "cryptocurrency_account",
"customer": "customer",
"ewallet_account": "ewallet_account",
"merchant_order": "merchant_order",
"payment_data": "payment_data",
"payment_method": "payment_method",
}
def __init__(
self,
callback_time=None,
card_account=None,
cryptocurrency_account=None,
customer=None,
ewallet_account=None,
merchant_order=None,
payment_data=None,
payment_method=None,
): # noqa: E501
"""PaymentCallback - a model defined in Swagger""" # noqa: E501
self._callback_time = None
self._card_account = None
self._cryptocurrency_account = None
self._customer = None
self._ewallet_account = None
self._merchant_order = None
self._payment_data = None
self._payment_method = None
self.discriminator = None
if callback_time is not None:
self.callback_time = callback_time
if card_account is not None:
self.card_account = card_account
if cryptocurrency_account is not None:
self.cryptocurrency_account = cryptocurrency_account
if customer is not None:
self.customer = customer
if ewallet_account is not None:
self.ewallet_account = ewallet_account
if merchant_order is not None:
self.merchant_order = merchant_order
if payment_data is not None:
self.payment_data = payment_data
if payment_method is not None:
self.payment_method = payment_method
@property
def callback_time(self):
"""Gets the callback_time of this PaymentCallback. # noqa: E501
Date and time of created callback in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) format # noqa: E501
:return: The callback_time of this PaymentCallback. # noqa: E501
:rtype: str
"""
return self._callback_time
@callback_time.setter
def callback_time(self, callback_time):
"""Sets the callback_time of this PaymentCallback.
Date and time of created callback in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) format # noqa: E501
:param callback_time: The callback_time of this PaymentCallback. # noqa: E501
:type: str
"""
self._callback_time = callback_time
@property
def card_account(self):
"""Gets the card_account of this PaymentCallback. # noqa: E501
Card account data *(for BANKCARD payment method only)* # noqa: E501
:return: The card_account of this PaymentCallback. # noqa: E501
:rtype: PaymentResponseCardAccount
"""
return self._card_account
@card_account.setter
def card_account(self, card_account):
"""Sets the card_account of this PaymentCallback.
Card account data *(for BANKCARD payment method only)* # noqa: E501
:param card_account: The card_account of this PaymentCallback. # noqa: E501
:type: PaymentResponseCardAccount
"""
self._card_account = card_account
@property
def cryptocurrency_account(self):
"""Gets the cryptocurrency_account of this PaymentCallback. # noqa: E501
Cryptocurrency account data *(for BITCOIN payment method only)* # noqa: E501
:return: The cryptocurrency_account of this PaymentCallback. # noqa: E501
:rtype: PaymentResponseCryptocurrencyAccount
"""
return self._cryptocurrency_account
@cryptocurrency_account.setter
def cryptocurrency_account(self, cryptocurrency_account):
"""Sets the cryptocurrency_account of this PaymentCallback.
Cryptocurrency account data *(for BITCOIN payment method only)* # noqa: E501
:param cryptocurrency_account: The cryptocurrency_account of this PaymentCallback. # noqa: E501
:type: PaymentResponseCryptocurrencyAccount
"""
self._cryptocurrency_account = cryptocurrency_account
@property
def customer(self):
"""Gets the customer of this PaymentCallback. # noqa: E501
Customer data # noqa: E501
:return: The customer of this PaymentCallback. # noqa: E501
:rtype: PaymentRequestCustomer
"""
return self._customer
@customer.setter
def customer(self, customer):
"""Sets the customer of this PaymentCallback.
Customer data # noqa: E501
:param customer: The customer of this PaymentCallback. # noqa: E501
:type: PaymentRequestCustomer
"""
self._customer = customer
@property
def ewallet_account(self):
"""Gets the ewallet_account of this PaymentCallback. # noqa: E501
eWallet account data *(for ALIPAY, QIWI, WEBMONEY, NETELLER, YANDEXMONEY, DIRECTBANKINGNGA, AQRCODE, AIRTEL, MPESA, MTN, UGANDAMOBILE, VODAFONE, TIGO and 'Latin America' payment methods only)* # noqa: E501
:return: The ewallet_account of this PaymentCallback. # noqa: E501
:rtype: TransactionResponseEWalletAccount
"""
return self._ewallet_account
@ewallet_account.setter
def ewallet_account(self, ewallet_account):
"""Sets the ewallet_account of this PaymentCallback.
eWallet account data *(for ALIPAY, QIWI, WEBMONEY, NETELLER, YANDEXMONEY, DIRECTBANKINGNGA, AQRCODE, AIRTEL, MPESA, MTN, UGANDAMOBILE, VODAFONE, TIGO and 'Latin America' payment methods only)* # noqa: E501
:param ewallet_account: The ewallet_account of this PaymentCallback. # noqa: E501
:type: TransactionResponseEWalletAccount
"""
self._ewallet_account = ewallet_account
@property
def merchant_order(self):
"""Gets the merchant_order of this PaymentCallback. # noqa: E501
Merchant order data # noqa: E501
:return: The merchant_order of this PaymentCallback. # noqa: E501
:rtype: TransactionResponseMerchantOrder
"""
return self._merchant_order
@merchant_order.setter
def merchant_order(self, merchant_order):
"""Sets the merchant_order of this PaymentCallback.
Merchant order data # noqa: E501
:param merchant_order: The merchant_order of this PaymentCallback. # noqa: E501
:type: TransactionResponseMerchantOrder
"""
self._merchant_order = merchant_order
@property
def payment_data(self):
"""Gets the payment_data of this PaymentCallback. # noqa: E501
Payment data # noqa: E501
:return: The payment_data of this PaymentCallback. # noqa: E501
:rtype: PaymentResponsePaymentData
"""
return self._payment_data
@payment_data.setter
def payment_data(self, payment_data):
"""Sets the payment_data of this PaymentCallback.
Payment data # noqa: E501
:param payment_data: The payment_data of this PaymentCallback. # noqa: E501
:type: PaymentResponsePaymentData
"""
self._payment_data = payment_data
@property
def payment_method(self):
"""Gets the payment_method of this PaymentCallback. # noqa: E501
Used payment method type name from payment methods list # noqa: E501
:return: The payment_method of this PaymentCallback. # noqa: E501
:rtype: str
"""
return self._payment_method
@payment_method.setter
def payment_method(self, payment_method):
"""Sets the payment_method of this PaymentCallback.
Used payment method type name from payment methods list # noqa: E501
:param payment_method: The payment_method of this PaymentCallback. # noqa: E501
:type: str
"""
self._payment_method = payment_method
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
if value is not None:
result[attr] = value
if issubclass(PaymentCallback, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PaymentCallback):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
eef5b333c3116e66c2bb74ed66d69cdc15a106b0 | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /LightGBM_sklearn_scipy_numpy/source/sklearn/feature_selection/from_model.py | 2502643453d797d2ecf32c119697a97f4ab76e5e | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 7,363 | py | # Authors: Gilles Louppe, Mathieu Blondel, Maheshakya Wijewardena
# License: BSD 3 clause
import numpy as np
from .base import SelectorMixin
from ..base import BaseEstimator, clone, MetaEstimatorMixin
from ..externals import six
from ..exceptions import NotFittedError
from ..utils.metaestimators import if_delegate_has_method
def _get_feature_importances(estimator, norm_order=1):
"""Retrieve or aggregate feature importances from estimator"""
importances = getattr(estimator, "feature_importances_", None)
if importances is None and hasattr(estimator, "coef_"):
if estimator.coef_.ndim == 1:
importances = np.abs(estimator.coef_)
else:
importances = np.linalg.norm(estimator.coef_, axis=0,
ord=norm_order)
elif importances is None:
raise ValueError(
"The underlying estimator %s has no `coef_` or "
"`feature_importances_` attribute. Either pass a fitted estimator"
" to SelectFromModel or call fit before calling transform."
% estimator.__class__.__name__)
return importances
def _calculate_threshold(estimator, importances, threshold):
"""Interpret the threshold value"""
if threshold is None:
# determine default from estimator
est_name = estimator.__class__.__name__
if ((hasattr(estimator, "penalty") and estimator.penalty == "l1") or
"Lasso" in est_name):
# the natural default threshold is 0 when l1 penalty was used
threshold = 1e-5
else:
threshold = "mean"
if isinstance(threshold, six.string_types):
if "*" in threshold:
scale, reference = threshold.split("*")
scale = float(scale.strip())
reference = reference.strip()
if reference == "median":
reference = np.median(importances)
elif reference == "mean":
reference = np.mean(importances)
else:
raise ValueError("Unknown reference: " + reference)
threshold = scale * reference
elif threshold == "median":
threshold = np.median(importances)
elif threshold == "mean":
threshold = np.mean(importances)
else:
raise ValueError("Expected threshold='mean' or threshold='median' "
"got %s" % threshold)
else:
threshold = float(threshold)
return threshold
class SelectFromModel(BaseEstimator, SelectorMixin, MetaEstimatorMixin):
"""Meta-transformer for selecting features based on importance weights.
.. versionadded:: 0.17
Parameters
----------
estimator : object
The base estimator from which the transformer is built.
This can be both a fitted (if ``prefit`` is set to True)
or a non-fitted estimator. The estimator must have either a
``feature_importances_`` or ``coef_`` attribute after fitting.
threshold : string, float, optional default None
The threshold value to use for feature selection. Features whose
importance is greater or equal are kept while the others are
discarded. If "median" (resp. "mean"), then the ``threshold`` value is
the median (resp. the mean) of the feature importances. A scaling
factor (e.g., "1.25*mean") may also be used. If None and if the
estimator has a parameter penalty set to l1, either explicitly
or implicitly (e.g, Lasso), the threshold used is 1e-5.
Otherwise, "mean" is used by default.
prefit : bool, default False
Whether a prefit model is expected to be passed into the constructor
directly or not. If True, ``transform`` must be called directly
and SelectFromModel cannot be used with ``cross_val_score``,
``GridSearchCV`` and similar utilities that clone the estimator.
Otherwise train the model using ``fit`` and then ``transform`` to do
feature selection.
norm_order : non-zero int, inf, -inf, default 1
Order of the norm used to filter the vectors of coefficients below
``threshold`` in the case where the ``coef_`` attribute of the
estimator is of dimension 2.
Attributes
----------
estimator_ : an estimator
The base estimator from which the transformer is built.
This is stored only when a non-fitted estimator is passed to the
``SelectFromModel``, i.e when prefit is False.
threshold_ : float
The threshold value used for feature selection.
"""
def __init__(self, estimator, threshold=None, prefit=False, norm_order=1):
self.estimator = estimator
self.threshold = threshold
self.prefit = prefit
self.norm_order = norm_order
def _get_support_mask(self):
# SelectFromModel can directly call on transform.
if self.prefit:
estimator = self.estimator
elif hasattr(self, 'estimator_'):
estimator = self.estimator_
else:
raise ValueError(
'Either fit SelectFromModel before transform or set "prefit='
'True" and pass a fitted estimator to the constructor.')
scores = _get_feature_importances(estimator, self.norm_order)
threshold = _calculate_threshold(estimator, scores, self.threshold)
return scores >= threshold
def fit(self, X, y=None, **fit_params):
"""Fit the SelectFromModel meta-transformer.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values (integers that correspond to classes in
classification, real numbers in regression).
**fit_params : Other estimator specific parameters
Returns
-------
self : object
Returns self.
"""
if self.prefit:
raise NotFittedError(
"Since 'prefit=True', call transform directly")
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X, y, **fit_params)
return self
@property
def threshold_(self):
scores = _get_feature_importances(self.estimator_, self.norm_order)
return _calculate_threshold(self.estimator, scores, self.threshold)
@if_delegate_has_method('estimator')
def partial_fit(self, X, y=None, **fit_params):
"""Fit the SelectFromModel meta-transformer only once.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values (integers that correspond to classes in
classification, real numbers in regression).
**fit_params : Other estimator specific parameters
Returns
-------
self : object
Returns self.
"""
if self.prefit:
raise NotFittedError(
"Since 'prefit=True', call transform directly")
if not hasattr(self, "estimator_"):
self.estimator_ = clone(self.estimator)
self.estimator_.partial_fit(X, y, **fit_params)
return self
| [
"[email protected]"
] | |
a37c234ec1d0be68fbafc60a4a1427f320971ba1 | ff88a620c7437af9af946643cd65f06c99fe3601 | /IntermediateCodeAndTesting/OldTaskVersions/LearningStopAuction3_lastrun.py | 86ce178aa462740ffb8c23c4acfcbc6267b4b722 | [] | no_license | bissettp/TrainedInhibitionTask | c2f20dadbb0e440c4fcf2bd3c4d670a7416df93c | 82727bd3ffa101209a61f2ff4f057f8896522d5d | refs/heads/master | 2020-06-06T20:02:16.470092 | 2015-06-23T17:56:15 | 2015-06-23T17:56:15 | 34,129,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118,715 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This experiment was created using PsychoPy2 Experiment Builder (v1.82.01), Thu Apr 2 12:39:07 2015
If you publish work using this script please cite the relevant PsychoPy publications
Peirce, JW (2007) PsychoPy - Psychophysics software in Python. Journal of Neuroscience Methods, 162(1-2), 8-13.
Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy. Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008
"""
from __future__ import division # so that 1/3=0.333 instead of 1/3=0
from psychopy import visual, core, data, event, logging, sound, gui
from psychopy.constants import * # things like STARTED, FINISHED
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(_thisDir)
# Store info about the experiment session
expName = 'LearningTask' # from the Builder filename that created this script
expInfo = {'participant':'', 'gender (m/f)':'', 'age':'', 'session':03}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False: core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data' + os.sep + '%s_%s' %(expInfo['participant'], expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath=u'/Users/patrickbissett/OneDrive/Poldrack/TrainedInhibition/PsychoPy/LearningStopAuction3.psyexp',
savePickle=True, saveWideText=True,
dataFileName=filename)
#save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.WARNING)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
# Start Code - component code to be run before the window creation
# Setup the Window
win = visual.Window(size=(1440, 900), fullscr=True, screen=0, allowGUI=False, allowStencil=False,
monitor='testMonitor', color='black', colorSpace='rgb',
blendMode='avg', useFBO=True,
)
# store frame rate of monitor if we can measure it successfully
expInfo['frameRate']=win.getActualFrameRate()
if expInfo['frameRate']!=None:
frameDur = 1.0/round(expInfo['frameRate'])
else:
frameDur = 1.0/60.0 # couldn't get a reliable measure so guess
# Initialize components for Routine "StimSetup"
StimSetupClock = core.Clock()
from copy import deepcopy
colors = ['yellow', 'white', 'orange', 'magenta', 'green', 'gray', 'cyan', 'blue']
shapes = ['triangle', 'square', 'line', 'invertedtriangle', 'hexagon', 'diamond', 'cross', 'circle']
rewards = [0.50, 1.00, 2.00, 4.00] * 2
conditions = ['go', 'go', 'go', 'go', 'stop', 'stop', 'stop', 'stop']
trialDetailsList = []
learningTrialList = []
learningPracTrialList = []
Redo = 1
shuffle(colors)
shuffle(shapes)
for i, color in enumerate(colors): # cycle through each color and keep track of an index number
trialDetails = {} # a dictionary of key-value pairs
trialDetails['fileName'] = shapes[i] + color + '.gif'
trialDetails['reward'] = rewards[i]
trialDetails['condition'] = conditions[i]
trialDetailsList.append(trialDetails)
shuffle(trialDetailsList) # do this now to ensure that order of presentation of rewards and conditions is also shuffled
conditionOne = trialDetailsList[0]
conditionTwo = trialDetailsList[1]
conditionThree = trialDetailsList[2]
conditionFour = trialDetailsList[3]
conditionFive = trialDetailsList[4]
conditionSix = trialDetailsList[5]
conditionSeven = trialDetailsList[6]
conditionEight = trialDetailsList[7]
numLearningTrials = 400
numLearningRepetitions = 50
for k in range(1, numLearningRepetitions + 1):
learningTrialList.append(conditionOne)
learningTrialList.append(conditionTwo)
learningTrialList.append(conditionThree)
learningTrialList.append(conditionFour)
learningTrialList.append(conditionFive)
learningTrialList.append(conditionSix)
learningTrialList.append(conditionSeven)
learningTrialList.append(conditionEight)
shuffle(learningTrialList)
learningPracTrialList.append(conditionOne)
learningPracTrialList.append(conditionTwo)
learningPracTrialList.append(conditionThree)
learningPracTrialList.append(conditionFour)
learningPracTrialList.append(conditionFive)
learningPracTrialList.append(conditionSix)
learningPracTrialList.append(conditionSeven)
learningPracTrialList.append(conditionEight)
learningPracTrialList.append(conditionOne)
learningPracTrialList.append(conditionTwo)
shuffle(learningPracTrialList)
stopTaskTrials = 288
stopTrialsPerStopStim = 27
goTrialsPerStopStim = 9
goTrialsPerGoStim = 36
numStim = 8
stopTrialList = []
stopPracTrialList = []
if conditionOne['condition'] == 'go':
conditionOne['stopOrGo'] = 'go'
stopPracTrialList.append(deepcopy(conditionOne))
stopPracTrialList.append(deepcopy(conditionOne))
for i in range(1, goTrialsPerGoStim + 1):
stopTrialList.append(deepcopy(conditionOne))
for i in range(1, goTrialsPerGoStim + 1):
stopTrialList.append(conditionOne)
else:
for j in range(1, goTrialsPerStopStim + 1):
conditionOne['stopOrGo'] = 'go'
stopTrialList.append(deepcopy(conditionOne))
stopPracTrialList.append(deepcopy(conditionOne))
for i in range(1, stopTrialsPerStopStim + 1):
conditionOne['stopOrGo'] = 'stop'
stopTrialList.append(deepcopy(conditionOne))
stopPracTrialList.append(deepcopy(conditionOne))
if conditionTwo['condition'] == 'go':
conditionTwo['stopOrGo'] = 'go'
stopPracTrialList.append(deepcopy(conditionTwo))
stopPracTrialList.append(deepcopy(conditionTwo))
for i in range(1, goTrialsPerGoStim + 1):
stopTrialList.append(deepcopy(conditionTwo))
else:
for j in range(1, goTrialsPerStopStim + 1):
conditionTwo['stopOrGo'] = 'go'
stopTrialList.append(deepcopy(conditionTwo))
stopPracTrialList.append(deepcopy(conditionTwo))
for i in range(1, stopTrialsPerStopStim + 1):
conditionTwo['stopOrGo'] = 'stop'
stopTrialList.append(deepcopy(conditionTwo))
stopPracTrialList.append(deepcopy(conditionTwo))
if conditionThree['condition'] == 'go':
conditionThree['stopOrGo'] = 'go'
stopPracTrialList.append(deepcopy(conditionThree))
for i in range(1, goTrialsPerGoStim + 1):
stopTrialList.append(deepcopy(conditionThree))
else:
for j in range(1, goTrialsPerStopStim + 1):
conditionThree['stopOrGo'] = 'go'
stopTrialList.append(deepcopy(conditionThree))
for i in range(1, stopTrialsPerStopStim + 1):
conditionThree['stopOrGo'] = 'stop'
stopTrialList.append(deepcopy(conditionThree))
stopPracTrialList.append(deepcopy(conditionThree))
if conditionFour['condition'] == 'go':
conditionFour['stopOrGo'] = 'go'
stopPracTrialList.append(deepcopy(conditionFour))
for i in range(1, goTrialsPerGoStim + 1):
stopTrialList.append(deepcopy(conditionFour))
else:
for j in range(1, goTrialsPerStopStim + 1):
conditionFour['stopOrGo'] = 'go'
stopTrialList.append(deepcopy(conditionFour))
for i in range(1, stopTrialsPerStopStim + 1):
conditionFour['stopOrGo'] = 'stop'
stopTrialList.append(deepcopy(conditionFour))
stopPracTrialList.append(deepcopy(conditionFour))
if conditionFive['condition'] == 'go':
conditionFive['stopOrGo'] = 'go'
stopPracTrialList.append(deepcopy(conditionFive))
for i in range(1, goTrialsPerGoStim + 1):
stopTrialList.append(deepcopy(conditionFive))
else:
for j in range(1, goTrialsPerStopStim + 1):
conditionFive['stopOrGo'] = 'go'
stopTrialList.append(deepcopy(conditionFive))
for i in range(1, stopTrialsPerStopStim + 1):
conditionFive['stopOrGo'] = 'stop'
stopTrialList.append(deepcopy(conditionFive))
stopPracTrialList.append(deepcopy(conditionFive))
if conditionSix['condition'] == 'go':
conditionSix['stopOrGo'] = 'go'
stopPracTrialList.append(deepcopy(conditionSix))
for i in range(1, goTrialsPerGoStim + 1):
stopTrialList.append(deepcopy(conditionSix))
else:
for j in range(1, stopTrialsPerStopStim + 1):
conditionSix['stopOrGo'] = 'go'
stopTrialList.append(deepcopy(conditionSix))
for i in range(1, goTrialsPerStopStim + 1):
conditionSix['stopOrGo'] = 'stop'
stopTrialList.append(deepcopy(conditionSix))
stopPracTrialList.append(deepcopy(conditionSix))
if conditionSeven['condition'] == 'go':
conditionSeven['stopOrGo'] = 'go'
stopPracTrialList.append(deepcopy(conditionSeven))
for i in range(1, goTrialsPerGoStim + 1):
stopTrialList.append(deepcopy(conditionSeven))
else:
for j in range(1, goTrialsPerStopStim + 1):
conditionSeven['stopOrGo'] = 'go'
stopTrialList.append(deepcopy(conditionSeven))
for i in range(1, stopTrialsPerStopStim + 1):
conditionSeven['stopOrGo'] = 'stop'
stopTrialList.append(deepcopy(conditionSeven))
stopPracTrialList.append(deepcopy(conditionSeven))
if conditionEight['condition'] == 'go':
conditionEight['stopOrGo'] = 'go'
stopPracTrialList.append(deepcopy(conditionEight))
for i in range(1, goTrialsPerGoStim + 1):
stopTrialList.append(deepcopy(conditionEight))
else:
for j in range(1, goTrialsPerStopStim + 1):
conditionEight['stopOrGo'] = 'go'
stopTrialList.append(deepcopy(conditionEight))
for i in range(1, stopTrialsPerStopStim + 1):
conditionEight['stopOrGo'] = 'stop'
stopTrialList.append(deepcopy(conditionEight))
stopPracTrialList.append(deepcopy(conditionEight))
shuffle(stopTrialList)
shuffle(stopPracTrialList)
# Initialize components for Routine "instrPractice"
instrPracticeClock = core.Clock()
instruct1 = visual.TextStim(win=win, ori=0, name='instruct1',
text='A shape stimulus will appear on every trial. \n\nIf it appears in the upper right quadrant, PRESS W\n\nIf it appears in the lower right quadrant, PRESS S\n\nIf it appears in the lower left quadrant, PRESS A\n\nIf it appears in the upper left quadrant, PRESS Q\n\nResponding as fast and as accurately as possible will lead to higher rewards. \n\nPress any key when you are ready to proceed. ', font='Arial',
pos=[0, 0], height=0.07, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=0.0)
# Initialize components for Routine "NewPracStim"
NewPracStimClock = core.Clock()
# Initialize components for Routine "trial"
trialClock = core.Clock()
text = visual.TextStim(win=win, ori=0, name='text',
text='+', font='Arial',
pos=[0, 0], height=1, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=-1.0)
image_2 = visual.ImageStim(win=win, name='image_2',units='pix',
image='sin', mask=None,
ori=0, pos=[0,0], size=[151, 151],
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-2.0)
# Initialize components for Routine "feedback"
feedbackClock = core.Clock()
#message variable just needs some value at start
message=0
feedback_2 = visual.TextStim(win=win, ori=0, name='feedback_2',
text='default text', font='Arial',
pos=[0, 0], height=.2, wrapWidth=None,
color=[1,1,1], colorSpace='rgb', opacity=1,
depth=-1.0)
image = visual.ImageStim(win=win, name='image',units='pix',
image='sin', mask=None,
ori=0, pos=[0,0], size=[151, 151],
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-2.0)
# Initialize components for Routine "Blank"
BlankClock = core.Clock()
text_2 = visual.TextStim(win=win, ori=0, name='text_2',
text=None, font='Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=0.0)
# Initialize components for Routine "ExitPrac"
ExitPracClock = core.Clock()
# Initialize components for Routine "instrStopPrac"
instrStopPracClock = core.Clock()
instrStopText = visual.TextStim(win=win, ori=0, name='instrStopText',
text='A shape stimulus will appear on the left or right side of the screen\n\nIf it appears on the left, press Z\n\nIf it appears on the right, press M\n\nIf you hear a tone, do not press anything on that trial\n\nResponding quickly to the location of the shape and withholding your response when you hear a tone are equally important. ', font='Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=0.0)
# Initialize components for Routine "newPracStopStim"
newPracStopStimClock = core.Clock()
# Initialize components for Routine "StopTrial"
StopTrialClock = core.Clock()
sound_1 = sound.Sound('900', secs=-1)
sound_1.setVolume(.2)
text_3 = visual.TextStim(win=win, ori=0, name='text_3',
text='|', font='Arial',
pos=[0, 0], height=4, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=-1.0)
image_3 = visual.ImageStim(win=win, name='image_3',units='pix',
image='sin', mask=None,
ori=0, pos=[0,0], size=[151, 151],
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-2.0)
# Initialize components for Routine "blankStop"
blankStopClock = core.Clock()
text_4 = visual.TextStim(win=win, ori=0, name='text_4',
text=None, font='Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=0.0)
# Initialize components for Routine "pracStopCleanUp"
pracStopCleanUpClock = core.Clock()
# Initialize components for Routine "endOfStopBlockFeedback"
endOfStopBlockFeedbackClock = core.Clock()
text_5 = visual.TextStim(win=win, ori=0, name='text_5',
text='default text', font='Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=-1.0)
# Initialize components for Routine "instrPractice"
instrPracticeClock = core.Clock()
instruct1 = visual.TextStim(win=win, ori=0, name='instruct1',
text='A shape stimulus will appear on every trial. \n\nIf it appears in the upper right quadrant, PRESS W\n\nIf it appears in the lower right quadrant, PRESS S\n\nIf it appears in the lower left quadrant, PRESS A\n\nIf it appears in the upper left quadrant, PRESS Q\n\nResponding as fast and as accurately as possible will lead to higher rewards. \n\nPress any key when you are ready to proceed. ', font='Arial',
pos=[0, 0], height=0.07, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=0.0)
# Initialize components for Routine "ResetAtBlock"
ResetAtBlockClock = core.Clock()
# Initialize components for Routine "NewStim"
NewStimClock = core.Clock()
# Initialize components for Routine "trial"
trialClock = core.Clock()
text = visual.TextStim(win=win, ori=0, name='text',
text='+', font='Arial',
pos=[0, 0], height=1, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=-1.0)
image_2 = visual.ImageStim(win=win, name='image_2',units='pix',
image='sin', mask=None,
ori=0, pos=[0,0], size=[151, 151],
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-2.0)
# Initialize components for Routine "feedback"
feedbackClock = core.Clock()
#message variable just needs some value at start
message=0
feedback_2 = visual.TextStim(win=win, ori=0, name='feedback_2',
text='default text', font='Arial',
pos=[0, 0], height=.2, wrapWidth=None,
color=[1,1,1], colorSpace='rgb', opacity=1,
depth=-1.0)
image = visual.ImageStim(win=win, name='image',units='pix',
image='sin', mask=None,
ori=0, pos=[0,0], size=[151, 151],
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-2.0)
# Initialize components for Routine "Blank"
BlankClock = core.Clock()
text_2 = visual.TextStim(win=win, ori=0, name='text_2',
text=None, font='Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=0.0)
# Initialize components for Routine "ExitMain"
ExitMainClock = core.Clock()
# Initialize components for Routine "instrStopPrac"
instrStopPracClock = core.Clock()
instrStopText = visual.TextStim(win=win, ori=0, name='instrStopText',
text='A shape stimulus will appear on the left or right side of the screen\n\nIf it appears on the left, press Z\n\nIf it appears on the right, press M\n\nIf you hear a tone, do not press anything on that trial\n\nResponding quickly to the location of the shape and withholding your response when you hear a tone are equally important. ', font='Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=0.0)
# Initialize components for Routine "stopBlockSetup"
stopBlockSetupClock = core.Clock()
goCumRT = 0
goRTCount = 0
omissionCount = 0
commissionCount = 0
stopTrialCount = 0
stopSuccessCount = 0
goTrialCount = 0
# Initialize components for Routine "newStopStim"
newStopStimClock = core.Clock()
# Initialize components for Routine "StopTrial"
StopTrialClock = core.Clock()
sound_1 = sound.Sound('900', secs=-1)
sound_1.setVolume(.2)
text_3 = visual.TextStim(win=win, ori=0, name='text_3',
text='|', font='Arial',
pos=[0, 0], height=4, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=-1.0)
image_3 = visual.ImageStim(win=win, name='image_3',units='pix',
image='sin', mask=None,
ori=0, pos=[0,0], size=[151, 151],
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-2.0)
# Initialize components for Routine "blankStop"
blankStopClock = core.Clock()
text_4 = visual.TextStim(win=win, ori=0, name='text_4',
text=None, font='Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=0.0)
# Initialize components for Routine "SSDChange"
SSDChangeClock = core.Clock()
# Initialize components for Routine "endOfStopBlockFeedback"
endOfStopBlockFeedbackClock = core.Clock()
text_5 = visual.TextStim(win=win, ori=0, name='text_5',
text='default text', font='Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=-1.0)
# Initialize components for Routine "End"
EndClock = core.Clock()
text_6 = visual.TextStim(win=win, ori=0, name='text_6',
text='This is the end, beautiful friend', font='Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=0.0)
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
#------Prepare to start Routine "StimSetup"-------
t = 0
StimSetupClock.reset() # clock
frameN = -1
# update component parameters for each repeat
# keep track of which components have finished
StimSetupComponents = []
for thisComponent in StimSetupComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "StimSetup"-------
continueRoutine = True
while continueRoutine:
# get current time
t = StimSetupClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in StimSetupComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "StimSetup"-------
for thisComponent in StimSetupComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "StimSetup" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
#------Prepare to start Routine "instrPractice"-------
t = 0
instrPracticeClock.reset() # clock
frameN = -1
# update component parameters for each repeat
ok1 = event.BuilderKeyResponse() # create an object of type KeyResponse
ok1.status = NOT_STARTED
# keep track of which components have finished
instrPracticeComponents = []
instrPracticeComponents.append(instruct1)
instrPracticeComponents.append(ok1)
for thisComponent in instrPracticeComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "instrPractice"-------
continueRoutine = True
while continueRoutine:
# get current time
t = instrPracticeClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *instruct1* updates
if t >= 0.0 and instruct1.status == NOT_STARTED:
# keep track of start time/frame for later
instruct1.tStart = t # underestimates by a little under one frame
instruct1.frameNStart = frameN # exact frame index
instruct1.setAutoDraw(True)
# *ok1* updates
if t >= 0.0 and ok1.status == NOT_STARTED:
# keep track of start time/frame for later
ok1.tStart = t # underestimates by a little under one frame
ok1.frameNStart = frameN # exact frame index
ok1.status = STARTED
# keyboard checking is just starting
ok1.clock.reset() # now t=0
event.clearEvents(eventType='keyboard')
if ok1.status == STARTED:
theseKeys = event.getKeys()
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
ok1.keys = theseKeys[-1] # just the last key pressed
ok1.rt = ok1.clock.getTime()
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in instrPracticeComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "instrPractice"-------
for thisComponent in instrPracticeComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if ok1.keys in ['', [], None]: # No response was made
ok1.keys=None
# store data for thisExp (ExperimentHandler)
thisExp.addData('ok1.keys',ok1.keys)
if ok1.keys != None: # we had a response
thisExp.addData('ok1.rt', ok1.rt)
thisExp.nextEntry()
# the Routine "instrPractice" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
pracTrials = data.TrialHandler(nReps=1, method='fullRandom',
extraInfo=expInfo, originPath=u'/Users/patrickbissett/OneDrive/Poldrack/TrainedInhibition/PsychoPy/LearningStopAuction3.psyexp',
trialList=data.importConditions('TrialtypesLearningPrac.xlsx'),
seed=None, name='pracTrials')
thisExp.addLoop(pracTrials) # add the loop to the experiment
thisPracTrial = pracTrials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb=thisPracTrial.rgb)
if thisPracTrial != None:
for paramName in thisPracTrial.keys():
exec(paramName + '= thisPracTrial.' + paramName)
for thisPracTrial in pracTrials:
currentLoop = pracTrials
# abbreviate parameter names if possible (e.g. rgb = thisPracTrial.rgb)
if thisPracTrial != None:
for paramName in thisPracTrial.keys():
exec(paramName + '= thisPracTrial.' + paramName)
#------Prepare to start Routine "NewPracStim"-------
t = 0
NewPracStimClock.reset() # clock
frameN = -1
# update component parameters for each repeat
currentLearningPracTrial = learningPracTrialList.pop(0)
currentStimulus = currentLearningPracTrial['fileName']
currentReward = currentLearningPracTrial['reward']
# keep track of which components have finished
NewPracStimComponents = []
for thisComponent in NewPracStimComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "NewPracStim"-------
continueRoutine = True
while continueRoutine:
# get current time
t = NewPracStimClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in NewPracStimComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "NewPracStim"-------
for thisComponent in NewPracStimComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "NewPracStim" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
ReDoLoopPrac = data.TrialHandler(nReps=999, method='fullRandom',
extraInfo=expInfo, originPath=u'/Users/patrickbissett/OneDrive/Poldrack/TrainedInhibition/PsychoPy/LearningStopAuction3.psyexp',
trialList=[None],
seed=None, name='ReDoLoopPrac')
thisExp.addLoop(ReDoLoopPrac) # add the loop to the experiment
thisReDoLoopPrac = ReDoLoopPrac.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb=thisReDoLoopPrac.rgb)
if thisReDoLoopPrac != None:
for paramName in thisReDoLoopPrac.keys():
exec(paramName + '= thisReDoLoopPrac.' + paramName)
for thisReDoLoopPrac in ReDoLoopPrac:
currentLoop = ReDoLoopPrac
# abbreviate parameter names if possible (e.g. rgb = thisReDoLoopPrac.rgb)
if thisReDoLoopPrac != None:
for paramName in thisReDoLoopPrac.keys():
exec(paramName + '= thisReDoLoopPrac.' + paramName)
#------Prepare to start Routine "trial"-------
t = 0
trialClock.reset() # clock
frameN = -1
routineTimer.add(1.500000)
# update component parameters for each repeat
resp = event.BuilderKeyResponse() # create an object of type KeyResponse
resp.status = NOT_STARTED
image_2.setPos([xPos, yPos])
image_2.setImage(currentStimulus)
# keep track of which components have finished
trialComponents = []
trialComponents.append(resp)
trialComponents.append(text)
trialComponents.append(image_2)
for thisComponent in trialComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "trial"-------
continueRoutine = True
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = trialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *resp* updates
if t >= .5 and resp.status == NOT_STARTED:
# keep track of start time/frame for later
resp.tStart = t # underestimates by a little under one frame
resp.frameNStart = frameN # exact frame index
resp.status = STARTED
# keyboard checking is just starting
resp.clock.reset() # now t=0
event.clearEvents(eventType='keyboard')
if resp.status == STARTED and t >= (.5 + (1-win.monitorFramePeriod*0.75)): #most of one frame period left
resp.status = STOPPED
if resp.status == STARTED:
theseKeys = event.getKeys(keyList=['q', 'w', 's', 'a'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
resp.keys = theseKeys[-1] # just the last key pressed
resp.rt = resp.clock.getTime()
# was this 'correct'?
if (resp.keys == str(corrAns)) or (resp.keys == corrAns):
resp.corr = 1
else:
resp.corr = 0
# a response ends the routine
continueRoutine = False
# *text* updates
if t >= 0.0 and text.status == NOT_STARTED:
# keep track of start time/frame for later
text.tStart = t # underestimates by a little under one frame
text.frameNStart = frameN # exact frame index
text.setAutoDraw(True)
if text.status == STARTED and t >= (0.0 + (1.5-win.monitorFramePeriod*0.75)): #most of one frame period left
text.setAutoDraw(False)
# *image_2* updates
if t >= .5 and image_2.status == NOT_STARTED:
# keep track of start time/frame for later
image_2.tStart = t # underestimates by a little under one frame
image_2.frameNStart = frameN # exact frame index
image_2.setAutoDraw(True)
if image_2.status == STARTED and t >= (.5 + (1.0-win.monitorFramePeriod*0.75)): #most of one frame period left
image_2.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "trial"-------
for thisComponent in trialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if resp.keys in ['', [], None]: # No response was made
resp.keys=None
# was no response the correct answer?!
if str(corrAns).lower() == 'none': resp.corr = 1 # correct non-response
else: resp.corr = 0 # failed to respond (incorrectly)
# store data for ReDoLoopPrac (TrialHandler)
ReDoLoopPrac.addData('resp.keys',resp.keys)
ReDoLoopPrac.addData('resp.corr', resp.corr)
if resp.keys != None: # we had a response
ReDoLoopPrac.addData('resp.rt', resp.rt)
#------Prepare to start Routine "feedback"-------
t = 0
feedbackClock.reset() # clock
frameN = -1
routineTimer.add(1.000000)
# update component parameters for each repeat
import random
displayReward = random.randrange(1, 6)
computedReward = round(currentReward + (random.randrange(-25, 26)*.01), 2)
if resp.corr:#stored on last run routine
if displayReward == 1:
message = "You won $0.00"
else:
message = "You won $ %.2f" %computedReward
elif resp.keys is None: #or len(key_resp.keys)<1:
#elif resp.rt == 0:
message ="Too Slow"
else:
message="Wrong"
msg="Correct! RT=%.3f" %(resp.rt)
feedback_2.setText(message)
image.setPos([xPos, yPos])
image.setImage(currentStimulus)
# keep track of which components have finished
feedbackComponents = []
feedbackComponents.append(feedback_2)
feedbackComponents.append(image)
for thisComponent in feedbackComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "feedback"-------
continueRoutine = True
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = feedbackClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *feedback_2* updates
if t >= 0.0 and feedback_2.status == NOT_STARTED:
# keep track of start time/frame for later
feedback_2.tStart = t # underestimates by a little under one frame
feedback_2.frameNStart = frameN # exact frame index
feedback_2.setAutoDraw(True)
if feedback_2.status == STARTED and t >= (0.0 + (1.0-win.monitorFramePeriod*0.75)): #most of one frame period left
feedback_2.setAutoDraw(False)
# *image* updates
if t >= 0.0 and image.status == NOT_STARTED:
# keep track of start time/frame for later
image.tStart = t # underestimates by a little under one frame
image.frameNStart = frameN # exact frame index
image.setAutoDraw(True)
if image.status == STARTED and t >= (0.0 + (1.0-win.monitorFramePeriod*0.75)): #most of one frame period left
image.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in feedbackComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "feedback"-------
for thisComponent in feedbackComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
#------Prepare to start Routine "Blank"-------
t = 0
BlankClock.reset() # clock
frameN = -1
routineTimer.add(0.500000)
# update component parameters for each repeat
# keep track of which components have finished
BlankComponents = []
BlankComponents.append(text_2)
for thisComponent in BlankComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "Blank"-------
continueRoutine = True
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = BlankClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_2* updates
if t >= 0 and text_2.status == NOT_STARTED:
# keep track of start time/frame for later
text_2.tStart = t # underestimates by a little under one frame
text_2.frameNStart = frameN # exact frame index
text_2.setAutoDraw(True)
if text_2.status == STARTED and t >= (0 + (.5-win.monitorFramePeriod*0.75)): #most of one frame period left
text_2.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in BlankComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "Blank"-------
for thisComponent in BlankComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
#------Prepare to start Routine "ExitPrac"-------
t = 0
ExitPracClock.reset() # clock
frameN = -1
# update component parameters for each repeat
# keep track of which components have finished
ExitPracComponents = []
for thisComponent in ExitPracComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "ExitPrac"-------
continueRoutine = True
while continueRoutine:
# get current time
t = ExitPracClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in ExitPracComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "ExitPrac"-------
for thisComponent in ExitPracComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
if resp.corr:
ReDoLoopPrac.finished = True
# the Routine "ExitPrac" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# completed 999 repeats of 'ReDoLoopPrac'
thisExp.nextEntry()
# completed 1 repeats of 'pracTrials'
# get names of stimulus parameters
if pracTrials.trialList in ([], [None], None): params = []
else: params = pracTrials.trialList[0].keys()
# save data for this loop
pracTrials.saveAsExcel(filename + '.xlsx', sheetName='pracTrials',
stimOut=params,
dataOut=['n','all_mean','all_std', 'all_raw'])
#------Prepare to start Routine "instrStopPrac"-------
t = 0
instrStopPracClock.reset() # clock
frameN = -1
# update component parameters for each repeat
key_resp_2 = event.BuilderKeyResponse() # create an object of type KeyResponse
key_resp_2.status = NOT_STARTED
InitSSD = .25
SSDLeft = InitSSD
SSDRight = InitSSD
# keep track of which components have finished
instrStopPracComponents = []
instrStopPracComponents.append(instrStopText)
instrStopPracComponents.append(key_resp_2)
for thisComponent in instrStopPracComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "instrStopPrac"-------
continueRoutine = True
while continueRoutine:
# get current time
t = instrStopPracClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *instrStopText* updates
if t >= 0.0 and instrStopText.status == NOT_STARTED:
# keep track of start time/frame for later
instrStopText.tStart = t # underestimates by a little under one frame
instrStopText.frameNStart = frameN # exact frame index
instrStopText.setAutoDraw(True)
# *key_resp_2* updates
if t >= 0.0 and key_resp_2.status == NOT_STARTED:
# keep track of start time/frame for later
key_resp_2.tStart = t # underestimates by a little under one frame
key_resp_2.frameNStart = frameN # exact frame index
key_resp_2.status = STARTED
# keyboard checking is just starting
event.clearEvents(eventType='keyboard')
if key_resp_2.status == STARTED:
theseKeys = event.getKeys()
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in instrStopPracComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "instrStopPrac"-------
for thisComponent in instrStopPracComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "instrStopPrac" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
pracStopTrials = data.TrialHandler(nReps=5, method='fullRandom',
extraInfo=expInfo, originPath=u'/Users/patrickbissett/OneDrive/Poldrack/TrainedInhibition/PsychoPy/LearningStopAuction3.psyexp',
trialList=data.importConditions('trialtypeStop.xlsx'),
seed=None, name='pracStopTrials')
thisExp.addLoop(pracStopTrials) # add the loop to the experiment
thisPracStopTrial = pracStopTrials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb=thisPracStopTrial.rgb)
if thisPracStopTrial != None:
for paramName in thisPracStopTrial.keys():
exec(paramName + '= thisPracStopTrial.' + paramName)
for thisPracStopTrial in pracStopTrials:
currentLoop = pracStopTrials
# abbreviate parameter names if possible (e.g. rgb = thisPracStopTrial.rgb)
if thisPracStopTrial != None:
for paramName in thisPracStopTrial.keys():
exec(paramName + '= thisPracStopTrial.' + paramName)
#------Prepare to start Routine "newPracStopStim"-------
t = 0
newPracStopStimClock.reset() # clock
frameN = -1
# update component parameters for each repeat
currentStopPracTrial = stopPracTrialList.pop(0)
currentGoStim = currentStopPracTrial['fileName']
currentStopOrGo = currentStopPracTrial['stopOrGo']
if currentStopOrGo == 'stop':
SSD = .25
elif currentStopOrGo == 'go':
SSD = -1
pracStopTrials.addData("beginningSSD", SSD)
pracStopTrials.addData("trialType", currentStopOrGo)
pracStopTrials.addData("goStim", currentGoStim)
SSDInput = SSD + .5
# keep track of which components have finished
newPracStopStimComponents = []
for thisComponent in newPracStopStimComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "newPracStopStim"-------
continueRoutine = True
while continueRoutine:
# get current time
t = newPracStopStimClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in newPracStopStimComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "newPracStopStim"-------
for thisComponent in newPracStopStimComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "newPracStopStim" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
#------Prepare to start Routine "StopTrial"-------
t = 0
StopTrialClock.reset() # clock
frameN = -1
# update component parameters for each repeat
image_3.setPos([xPosGoStim, yPosGoStim])
image_3.setImage(currentGoStim)
goResp = event.BuilderKeyResponse() # create an object of type KeyResponse
goResp.status = NOT_STARTED
# keep track of which components have finished
StopTrialComponents = []
StopTrialComponents.append(sound_1)
StopTrialComponents.append(text_3)
StopTrialComponents.append(image_3)
StopTrialComponents.append(goResp)
for thisComponent in StopTrialComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "StopTrial"-------
continueRoutine = True
while continueRoutine:
# get current time
t = StopTrialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# start/stop sound_1
if t >= SSDInput and sound_1.status == NOT_STARTED:
# keep track of start time/frame for later
sound_1.tStart = t # underestimates by a little under one frame
sound_1.frameNStart = frameN # exact frame index
sound_1.play() # start the sound (it finishes automatically)
if sound_1.status == STARTED and t >= (SSDInput + (.2-win.monitorFramePeriod*0.75)): #most of one frame period left
sound_1.stop() # stop the sound (if longer than duration)
# *text_3* updates
if t >= 0.0 and text_3.status == NOT_STARTED:
# keep track of start time/frame for later
text_3.tStart = t # underestimates by a little under one frame
text_3.frameNStart = frameN # exact frame index
text_3.setAutoDraw(True)
if text_3.status == STARTED and t >= (0.0 + (1.5-win.monitorFramePeriod*0.75)): #most of one frame period left
text_3.setAutoDraw(False)
# *image_3* updates
if t >= .5 and image_3.status == NOT_STARTED:
# keep track of start time/frame for later
image_3.tStart = t # underestimates by a little under one frame
image_3.frameNStart = frameN # exact frame index
image_3.setAutoDraw(True)
if image_3.status == STARTED and t >= (.5 + (1.0-win.monitorFramePeriod*0.75)): #most of one frame period left
image_3.setAutoDraw(False)
# *goResp* updates
if t >= .5 and goResp.status == NOT_STARTED:
# keep track of start time/frame for later
goResp.tStart = t # underestimates by a little under one frame
goResp.frameNStart = frameN # exact frame index
goResp.status = STARTED
# keyboard checking is just starting
goResp.clock.reset() # now t=0
event.clearEvents(eventType='keyboard')
if goResp.status == STARTED and t >= (.5 + (1-win.monitorFramePeriod*0.75)): #most of one frame period left
goResp.status = STOPPED
if goResp.status == STARTED:
theseKeys = event.getKeys(keyList=['z', 'm'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
if goResp.keys == []: # then this was the first keypress
goResp.keys = theseKeys[0] # just the first key pressed
goResp.rt = goResp.clock.getTime()
# was this 'correct'?
if (goResp.keys == str(corrGoResp)) or (goResp.keys == corrGoResp):
goResp.corr = 1
else:
goResp.corr = 0
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in StopTrialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "StopTrial"-------
for thisComponent in StopTrialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
sound_1.stop() #ensure sound has stopped at end of routine
# check responses
if goResp.keys in ['', [], None]: # No response was made
goResp.keys=None
# was no response the correct answer?!
if str(corrGoResp).lower() == 'none': goResp.corr = 1 # correct non-response
else: goResp.corr = 0 # failed to respond (incorrectly)
# store data for pracStopTrials (TrialHandler)
pracStopTrials.addData('goResp.keys',goResp.keys)
pracStopTrials.addData('goResp.corr', goResp.corr)
if goResp.keys != None: # we had a response
pracStopTrials.addData('goResp.rt', goResp.rt)
# the Routine "StopTrial" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
#------Prepare to start Routine "blankStop"-------
t = 0
blankStopClock.reset() # clock
frameN = -1
routineTimer.add(1.500000)
# update component parameters for each repeat
# keep track of which components have finished
blankStopComponents = []
blankStopComponents.append(text_4)
for thisComponent in blankStopComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "blankStop"-------
continueRoutine = True
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = blankStopClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_4* updates
if t >= 0.0 and text_4.status == NOT_STARTED:
# keep track of start time/frame for later
text_4.tStart = t # underestimates by a little under one frame
text_4.frameNStart = frameN # exact frame index
text_4.setAutoDraw(True)
if text_4.status == STARTED and t >= (0.0 + (1.5-win.monitorFramePeriod*0.75)): #most of one frame period left
text_4.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in blankStopComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "blankStop"-------
for thisComponent in blankStopComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
#------Prepare to start Routine "pracStopCleanUp"-------
t = 0
pracStopCleanUpClock.reset() # clock
frameN = -1
# update component parameters for each repeat
if currentStopPracTrial['stopOrGo'] == 'go':
goTrialCount = goTrialCount + 1
if goResp.corr and currentStopPracTrial['stopOrGo'] == 'go':
goCumRT = goCumRT + goResp.rt
goRTCount = goRTCount + 1
if currentStopPracTrial['stopOrGo'] == 'stop':
stopTrialCount = stopTrialCount + 1
if currentStopPracTrial['stopOrGo'] == 'stop' and goResp.keys is None:
stopSuccessCount = stopSuccessCount + 1
if currentStopPracTrial['stopOrGo'] == 'go':
if goResp.keys is None:
omissionCount = omissionCount + 1
elif goResp.corr == 0:
commissionCount = commissionCount + 1
pracStopTrials.addData("goTrailCountOutput", goTrialCount)
pracStopTrials.addData("goCumRTOutput", goCumRT)
pracStopTrials.addData("goRTCountOutput", goRTCount)
pracStopTrials.addData("stopTrialCountOutput", stopTrialCount)
pracStopTrials.addData("stopSuccessCountOutput", stopSuccessCount)
pracStopTrials.addData("omissionCountOutput", omissionCount)
pracStopTrials.addData("commissionCountOutput", commissionCount)
# keep track of which components have finished
pracStopCleanUpComponents = []
for thisComponent in pracStopCleanUpComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "pracStopCleanUp"-------
continueRoutine = True
while continueRoutine:
# get current time
t = pracStopCleanUpClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in pracStopCleanUpComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "pracStopCleanUp"-------
for thisComponent in pracStopCleanUpComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "pracStopCleanUp" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
thisExp.nextEntry()
# completed 5 repeats of 'pracStopTrials'
# get names of stimulus parameters
if pracStopTrials.trialList in ([], [None], None): params = []
else: params = pracStopTrials.trialList[0].keys()
# save data for this loop
pracStopTrials.saveAsExcel(filename + '.xlsx', sheetName='pracStopTrials',
stimOut=params,
dataOut=['n','all_mean','all_std', 'all_raw'])
#------Prepare to start Routine "endOfStopBlockFeedback"-------
t = 0
endOfStopBlockFeedbackClock.reset() # clock
frameN = -1
# update component parameters for each repeat
if goRTCount > 0:
goRTFeedback = goCumRT/goRTCount
goRTFeedback = round(goRTFeedback, 2)
else:
goRTFeedback = 'Null'
if goTrialCount > 0:
commissionRate = commissionCount/goTrialCount
commissionRate = round(commissionRate, 2)
omissionRate = omissionCount/goTrialCount
omissionRate = round(omissionRate, 2)
else:
commissionRate = 'Null'
omissionRate = 'Null'
if stopTrialCount > 0:
probabilityOfStop = stopSuccessCount/stopTrialCount
probabilityOfStop = round(probabilityOfStop, 2)
else:
probabilityOfStop = 'Null'
SSDFeedback = (SSDLeft+SSDRight)/2
SSDFeedback = round(SSDFeedback, 2)
stopMessage = " RT = " + str(goRTFeedback) + "\n Omission % = " + str(omissionRate) + "\n Commission % = " + str(commissionRate) + "\n\n\n\n " + str(probabilityOfStop) + "\n " + str(SSDFeedback)
text_5.setText(stopMessage
)
key_resp_3 = event.BuilderKeyResponse() # create an object of type KeyResponse
key_resp_3.status = NOT_STARTED
# keep track of which components have finished
endOfStopBlockFeedbackComponents = []
endOfStopBlockFeedbackComponents.append(text_5)
endOfStopBlockFeedbackComponents.append(key_resp_3)
for thisComponent in endOfStopBlockFeedbackComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "endOfStopBlockFeedback"-------
continueRoutine = True
while continueRoutine:
# get current time
t = endOfStopBlockFeedbackClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_5* updates
if t >= 0.0 and text_5.status == NOT_STARTED:
# keep track of start time/frame for later
text_5.tStart = t # underestimates by a little under one frame
text_5.frameNStart = frameN # exact frame index
text_5.setAutoDraw(True)
# *key_resp_3* updates
if t >= 0.0 and key_resp_3.status == NOT_STARTED:
# keep track of start time/frame for later
key_resp_3.tStart = t # underestimates by a little under one frame
key_resp_3.frameNStart = frameN # exact frame index
key_resp_3.status = STARTED
# keyboard checking is just starting
key_resp_3.clock.reset() # now t=0
event.clearEvents(eventType='keyboard')
if key_resp_3.status == STARTED:
theseKeys = event.getKeys(keyList=['y', 'n', 'left', 'right', 'space'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
key_resp_3.keys = theseKeys[-1] # just the last key pressed
key_resp_3.rt = key_resp_3.clock.getTime()
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in endOfStopBlockFeedbackComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "endOfStopBlockFeedback"-------
for thisComponent in endOfStopBlockFeedbackComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if key_resp_3.keys in ['', [], None]: # No response was made
key_resp_3.keys=None
# store data for thisExp (ExperimentHandler)
thisExp.addData('key_resp_3.keys',key_resp_3.keys)
if key_resp_3.keys != None: # we had a response
thisExp.addData('key_resp_3.rt', key_resp_3.rt)
thisExp.nextEntry()
# the Routine "endOfStopBlockFeedback" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
#------Prepare to start Routine "instrPractice"-------
t = 0
instrPracticeClock.reset() # clock
frameN = -1
# update component parameters for each repeat
ok1 = event.BuilderKeyResponse() # create an object of type KeyResponse
ok1.status = NOT_STARTED
# keep track of which components have finished
instrPracticeComponents = []
instrPracticeComponents.append(instruct1)
instrPracticeComponents.append(ok1)
for thisComponent in instrPracticeComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "instrPractice"-------
continueRoutine = True
while continueRoutine:
# get current time
t = instrPracticeClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *instruct1* updates
if t >= 0.0 and instruct1.status == NOT_STARTED:
# keep track of start time/frame for later
instruct1.tStart = t # underestimates by a little under one frame
instruct1.frameNStart = frameN # exact frame index
instruct1.setAutoDraw(True)
# *ok1* updates
if t >= 0.0 and ok1.status == NOT_STARTED:
# keep track of start time/frame for later
ok1.tStart = t # underestimates by a little under one frame
ok1.frameNStart = frameN # exact frame index
ok1.status = STARTED
# keyboard checking is just starting
ok1.clock.reset() # now t=0
event.clearEvents(eventType='keyboard')
if ok1.status == STARTED:
theseKeys = event.getKeys()
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
ok1.keys = theseKeys[-1] # just the last key pressed
ok1.rt = ok1.clock.getTime()
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in instrPracticeComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "instrPractice"-------
for thisComponent in instrPracticeComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if ok1.keys in ['', [], None]: # No response was made
ok1.keys=None
# store data for thisExp (ExperimentHandler)
thisExp.addData('ok1.keys',ok1.keys)
if ok1.keys != None: # we had a response
thisExp.addData('ok1.rt', ok1.rt)
thisExp.nextEntry()
# the Routine "instrPractice" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
Blocks = data.TrialHandler(nReps=4, method='sequential',
extraInfo=expInfo, originPath=u'/Users/patrickbissett/OneDrive/Poldrack/TrainedInhibition/PsychoPy/LearningStopAuction3.psyexp',
trialList=[None],
seed=None, name='Blocks')
thisExp.addLoop(Blocks) # add the loop to the experiment
thisBlock = Blocks.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb=thisBlock.rgb)
if thisBlock != None:
for paramName in thisBlock.keys():
exec(paramName + '= thisBlock.' + paramName)
for thisBlock in Blocks:
currentLoop = Blocks
# abbreviate parameter names if possible (e.g. rgb = thisBlock.rgb)
if thisBlock != None:
for paramName in thisBlock.keys():
exec(paramName + '= thisBlock.' + paramName)
#------Prepare to start Routine "ResetAtBlock"-------
t = 0
ResetAtBlockClock.reset() # clock
frameN = -1
# update component parameters for each repeat
Redo = 1
# keep track of which components have finished
ResetAtBlockComponents = []
for thisComponent in ResetAtBlockComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "ResetAtBlock"-------
continueRoutine = True
while continueRoutine:
# get current time
t = ResetAtBlockClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in ResetAtBlockComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "ResetAtBlock"-------
for thisComponent in ResetAtBlockComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "ResetAtBlock" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
trials = data.TrialHandler(nReps=25.0, method='fullRandom',
extraInfo=expInfo, originPath=u'/Users/patrickbissett/OneDrive/Poldrack/TrainedInhibition/PsychoPy/LearningStopAuction3.psyexp',
trialList=data.importConditions('TrialtypesLearning.xlsx'),
seed=None, name='trials')
thisExp.addLoop(trials) # add the loop to the experiment
thisTrial = trials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb=thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial.keys():
exec(paramName + '= thisTrial.' + paramName)
for thisTrial in trials:
currentLoop = trials
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial.keys():
exec(paramName + '= thisTrial.' + paramName)
#------Prepare to start Routine "NewStim"-------
t = 0
NewStimClock.reset() # clock
frameN = -1
# update component parameters for each repeat
currentLearningTrial = learningTrialList.pop(0)
currentStimulus = currentLearningTrial['fileName']
currentReward = currentLearningTrial['reward']
# keep track of which components have finished
NewStimComponents = []
for thisComponent in NewStimComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "NewStim"-------
continueRoutine = True
while continueRoutine:
# get current time
t = NewStimClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in NewStimComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "NewStim"-------
for thisComponent in NewStimComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "NewStim" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
ReDoLoopMain = data.TrialHandler(nReps=999, method='random',
extraInfo=expInfo, originPath=u'/Users/patrickbissett/OneDrive/Poldrack/TrainedInhibition/PsychoPy/LearningStopAuction3.psyexp',
trialList=[None],
seed=None, name='ReDoLoopMain')
thisExp.addLoop(ReDoLoopMain) # add the loop to the experiment
thisReDoLoopMain = ReDoLoopMain.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb=thisReDoLoopMain.rgb)
if thisReDoLoopMain != None:
for paramName in thisReDoLoopMain.keys():
exec(paramName + '= thisReDoLoopMain.' + paramName)
for thisReDoLoopMain in ReDoLoopMain:
currentLoop = ReDoLoopMain
# abbreviate parameter names if possible (e.g. rgb = thisReDoLoopMain.rgb)
if thisReDoLoopMain != None:
for paramName in thisReDoLoopMain.keys():
exec(paramName + '= thisReDoLoopMain.' + paramName)
#------Prepare to start Routine "trial"-------
t = 0
trialClock.reset() # clock
frameN = -1
routineTimer.add(1.500000)
# update component parameters for each repeat
resp = event.BuilderKeyResponse() # create an object of type KeyResponse
resp.status = NOT_STARTED
image_2.setPos([xPos, yPos])
image_2.setImage(currentStimulus)
# keep track of which components have finished
trialComponents = []
trialComponents.append(resp)
trialComponents.append(text)
trialComponents.append(image_2)
for thisComponent in trialComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "trial"-------
continueRoutine = True
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = trialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *resp* updates
if t >= .5 and resp.status == NOT_STARTED:
# keep track of start time/frame for later
resp.tStart = t # underestimates by a little under one frame
resp.frameNStart = frameN # exact frame index
resp.status = STARTED
# keyboard checking is just starting
resp.clock.reset() # now t=0
event.clearEvents(eventType='keyboard')
if resp.status == STARTED and t >= (.5 + (1-win.monitorFramePeriod*0.75)): #most of one frame period left
resp.status = STOPPED
if resp.status == STARTED:
theseKeys = event.getKeys(keyList=['q', 'w', 's', 'a'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
resp.keys = theseKeys[-1] # just the last key pressed
resp.rt = resp.clock.getTime()
# was this 'correct'?
if (resp.keys == str(corrAns)) or (resp.keys == corrAns):
resp.corr = 1
else:
resp.corr = 0
# a response ends the routine
continueRoutine = False
# *text* updates
if t >= 0.0 and text.status == NOT_STARTED:
# keep track of start time/frame for later
text.tStart = t # underestimates by a little under one frame
text.frameNStart = frameN # exact frame index
text.setAutoDraw(True)
if text.status == STARTED and t >= (0.0 + (1.5-win.monitorFramePeriod*0.75)): #most of one frame period left
text.setAutoDraw(False)
# *image_2* updates
if t >= .5 and image_2.status == NOT_STARTED:
# keep track of start time/frame for later
image_2.tStart = t # underestimates by a little under one frame
image_2.frameNStart = frameN # exact frame index
image_2.setAutoDraw(True)
if image_2.status == STARTED and t >= (.5 + (1.0-win.monitorFramePeriod*0.75)): #most of one frame period left
image_2.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "trial"-------
for thisComponent in trialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if resp.keys in ['', [], None]: # No response was made
resp.keys=None
# was no response the correct answer?!
if str(corrAns).lower() == 'none': resp.corr = 1 # correct non-response
else: resp.corr = 0 # failed to respond (incorrectly)
# store data for ReDoLoopMain (TrialHandler)
ReDoLoopMain.addData('resp.keys',resp.keys)
ReDoLoopMain.addData('resp.corr', resp.corr)
if resp.keys != None: # we had a response
ReDoLoopMain.addData('resp.rt', resp.rt)
#------Prepare to start Routine "feedback"-------
t = 0
feedbackClock.reset() # clock
frameN = -1
routineTimer.add(1.000000)
# update component parameters for each repeat
import random
displayReward = random.randrange(1, 6)
computedReward = round(currentReward + (random.randrange(-25, 26)*.01), 2)
if resp.corr:#stored on last run routine
if displayReward == 1:
message = "You won $0.00"
else:
message = "You won $ %.2f" %computedReward
elif resp.keys is None: #or len(key_resp.keys)<1:
#elif resp.rt == 0:
message ="Too Slow"
else:
message="Wrong"
msg="Correct! RT=%.3f" %(resp.rt)
feedback_2.setText(message)
image.setPos([xPos, yPos])
image.setImage(currentStimulus)
# keep track of which components have finished
feedbackComponents = []
feedbackComponents.append(feedback_2)
feedbackComponents.append(image)
for thisComponent in feedbackComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "feedback"-------
continueRoutine = True
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = feedbackClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *feedback_2* updates
if t >= 0.0 and feedback_2.status == NOT_STARTED:
# keep track of start time/frame for later
feedback_2.tStart = t # underestimates by a little under one frame
feedback_2.frameNStart = frameN # exact frame index
feedback_2.setAutoDraw(True)
if feedback_2.status == STARTED and t >= (0.0 + (1.0-win.monitorFramePeriod*0.75)): #most of one frame period left
feedback_2.setAutoDraw(False)
# *image* updates
if t >= 0.0 and image.status == NOT_STARTED:
# keep track of start time/frame for later
image.tStart = t # underestimates by a little under one frame
image.frameNStart = frameN # exact frame index
image.setAutoDraw(True)
if image.status == STARTED and t >= (0.0 + (1.0-win.monitorFramePeriod*0.75)): #most of one frame period left
image.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in feedbackComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "feedback"-------
for thisComponent in feedbackComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
#------Prepare to start Routine "Blank"-------
t = 0
BlankClock.reset() # clock
frameN = -1
routineTimer.add(0.500000)
# update component parameters for each repeat
# keep track of which components have finished
BlankComponents = []
BlankComponents.append(text_2)
for thisComponent in BlankComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "Blank"-------
continueRoutine = True
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = BlankClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_2* updates
if t >= 0 and text_2.status == NOT_STARTED:
# keep track of start time/frame for later
text_2.tStart = t # underestimates by a little under one frame
text_2.frameNStart = frameN # exact frame index
text_2.setAutoDraw(True)
if text_2.status == STARTED and t >= (0 + (.5-win.monitorFramePeriod*0.75)): #most of one frame period left
text_2.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in BlankComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "Blank"-------
for thisComponent in BlankComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
#------Prepare to start Routine "ExitMain"-------
t = 0
ExitMainClock.reset() # clock
frameN = -1
# update component parameters for each repeat
# keep track of which components have finished
ExitMainComponents = []
for thisComponent in ExitMainComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "ExitMain"-------
continueRoutine = True
while continueRoutine:
# get current time
t = ExitMainClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in ExitMainComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "ExitMain"-------
for thisComponent in ExitMainComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
if resp.corr:
ReDoLoopMain.finished = True
# the Routine "ExitMain" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# completed 999 repeats of 'ReDoLoopMain'
thisExp.nextEntry()
# completed 25.0 repeats of 'trials'
# get names of stimulus parameters
if trials.trialList in ([], [None], None): params = []
else: params = trials.trialList[0].keys()
# save data for this loop
trials.saveAsExcel(filename + '.xlsx', sheetName='trials',
stimOut=params,
dataOut=['n','all_mean','all_std', 'all_raw'])
thisExp.nextEntry()
# completed 4 repeats of 'Blocks'
# get names of stimulus parameters
if Blocks.trialList in ([], [None], None): params = []
else: params = Blocks.trialList[0].keys()
# save data for this loop
Blocks.saveAsExcel(filename + '.xlsx', sheetName='Blocks',
stimOut=params,
dataOut=['n','all_mean','all_std', 'all_raw'])
#------Prepare to start Routine "instrStopPrac"-------
t = 0
instrStopPracClock.reset() # clock
frameN = -1
# update component parameters for each repeat
key_resp_2 = event.BuilderKeyResponse() # create an object of type KeyResponse
key_resp_2.status = NOT_STARTED
InitSSD = .25
SSDLeft = InitSSD
SSDRight = InitSSD
# keep track of which components have finished
instrStopPracComponents = []
instrStopPracComponents.append(instrStopText)
instrStopPracComponents.append(key_resp_2)
for thisComponent in instrStopPracComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "instrStopPrac"-------
continueRoutine = True
while continueRoutine:
# get current time
t = instrStopPracClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *instrStopText* updates
if t >= 0.0 and instrStopText.status == NOT_STARTED:
# keep track of start time/frame for later
instrStopText.tStart = t # underestimates by a little under one frame
instrStopText.frameNStart = frameN # exact frame index
instrStopText.setAutoDraw(True)
# *key_resp_2* updates
if t >= 0.0 and key_resp_2.status == NOT_STARTED:
# keep track of start time/frame for later
key_resp_2.tStart = t # underestimates by a little under one frame
key_resp_2.frameNStart = frameN # exact frame index
key_resp_2.status = STARTED
# keyboard checking is just starting
event.clearEvents(eventType='keyboard')
if key_resp_2.status == STARTED:
theseKeys = event.getKeys()
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in instrStopPracComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "instrStopPrac"-------
for thisComponent in instrStopPracComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "instrStopPrac" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
StopBlocks = data.TrialHandler(nReps=8, method='sequential',
extraInfo=expInfo, originPath=u'/Users/patrickbissett/OneDrive/Poldrack/TrainedInhibition/PsychoPy/LearningStopAuction3.psyexp',
trialList=[None],
seed=None, name='StopBlocks')
thisExp.addLoop(StopBlocks) # add the loop to the experiment
thisStopBlock = StopBlocks.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb=thisStopBlock.rgb)
if thisStopBlock != None:
for paramName in thisStopBlock.keys():
exec(paramName + '= thisStopBlock.' + paramName)
for thisStopBlock in StopBlocks:
currentLoop = StopBlocks
# abbreviate parameter names if possible (e.g. rgb = thisStopBlock.rgb)
if thisStopBlock != None:
for paramName in thisStopBlock.keys():
exec(paramName + '= thisStopBlock.' + paramName)
#------Prepare to start Routine "stopBlockSetup"-------
t = 0
stopBlockSetupClock.reset() # clock
frameN = -1
# update component parameters for each repeat
# keep track of which components have finished
stopBlockSetupComponents = []
for thisComponent in stopBlockSetupComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "stopBlockSetup"-------
continueRoutine = True
while continueRoutine:
# get current time
t = stopBlockSetupClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in stopBlockSetupComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "stopBlockSetup"-------
for thisComponent in stopBlockSetupComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "stopBlockSetup" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
StopTrials = data.TrialHandler(nReps=18, method='fullRandom',
extraInfo=expInfo, originPath=u'/Users/patrickbissett/OneDrive/Poldrack/TrainedInhibition/PsychoPy/LearningStopAuction3.psyexp',
trialList=data.importConditions('trialtypeStop.xlsx'),
seed=None, name='StopTrials')
thisExp.addLoop(StopTrials) # add the loop to the experiment
thisStopTrial = StopTrials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb=thisStopTrial.rgb)
if thisStopTrial != None:
for paramName in thisStopTrial.keys():
exec(paramName + '= thisStopTrial.' + paramName)
for thisStopTrial in StopTrials:
currentLoop = StopTrials
# abbreviate parameter names if possible (e.g. rgb = thisStopTrial.rgb)
if thisStopTrial != None:
for paramName in thisStopTrial.keys():
exec(paramName + '= thisStopTrial.' + paramName)
#------Prepare to start Routine "newStopStim"-------
t = 0
newStopStimClock.reset() # clock
frameN = -1
# update component parameters for each repeat
currentStopTrial = stopTrialList.pop(0)
currentGoStim = currentStopTrial['fileName']
currentStopOrGo = currentStopTrial['stopOrGo']
if currentStopOrGo == 'stop':
if xPosGoStim == -200:
SSD = deepcopy(SSDLeft)
else:
SSD = deepcopy(SSDRight)
elif currentStopOrGo == 'go':
SSD = -1
StopTrials.addData("beginningSSD", SSD)
StopTrials.addData("trialType", currentStopOrGo)
StopTrials.addData("goStim", currentGoStim)
SSDInput = SSD + .5
# keep track of which components have finished
newStopStimComponents = []
for thisComponent in newStopStimComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "newStopStim"-------
continueRoutine = True
while continueRoutine:
# get current time
t = newStopStimClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in newStopStimComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "newStopStim"-------
for thisComponent in newStopStimComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "newStopStim" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
#------Prepare to start Routine "StopTrial"-------
t = 0
StopTrialClock.reset() # clock
frameN = -1
# update component parameters for each repeat
image_3.setPos([xPosGoStim, yPosGoStim])
image_3.setImage(currentGoStim)
goResp = event.BuilderKeyResponse() # create an object of type KeyResponse
goResp.status = NOT_STARTED
# keep track of which components have finished
StopTrialComponents = []
StopTrialComponents.append(sound_1)
StopTrialComponents.append(text_3)
StopTrialComponents.append(image_3)
StopTrialComponents.append(goResp)
for thisComponent in StopTrialComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "StopTrial"-------
continueRoutine = True
while continueRoutine:
# get current time
t = StopTrialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# start/stop sound_1
if t >= SSDInput and sound_1.status == NOT_STARTED:
# keep track of start time/frame for later
sound_1.tStart = t # underestimates by a little under one frame
sound_1.frameNStart = frameN # exact frame index
sound_1.play() # start the sound (it finishes automatically)
if sound_1.status == STARTED and t >= (SSDInput + (.2-win.monitorFramePeriod*0.75)): #most of one frame period left
sound_1.stop() # stop the sound (if longer than duration)
# *text_3* updates
if t >= 0.0 and text_3.status == NOT_STARTED:
# keep track of start time/frame for later
text_3.tStart = t # underestimates by a little under one frame
text_3.frameNStart = frameN # exact frame index
text_3.setAutoDraw(True)
if text_3.status == STARTED and t >= (0.0 + (1.5-win.monitorFramePeriod*0.75)): #most of one frame period left
text_3.setAutoDraw(False)
# *image_3* updates
if t >= .5 and image_3.status == NOT_STARTED:
# keep track of start time/frame for later
image_3.tStart = t # underestimates by a little under one frame
image_3.frameNStart = frameN # exact frame index
image_3.setAutoDraw(True)
if image_3.status == STARTED and t >= (.5 + (1.0-win.monitorFramePeriod*0.75)): #most of one frame period left
image_3.setAutoDraw(False)
# *goResp* updates
if t >= .5 and goResp.status == NOT_STARTED:
# keep track of start time/frame for later
goResp.tStart = t # underestimates by a little under one frame
goResp.frameNStart = frameN # exact frame index
goResp.status = STARTED
# keyboard checking is just starting
goResp.clock.reset() # now t=0
event.clearEvents(eventType='keyboard')
if goResp.status == STARTED and t >= (.5 + (1-win.monitorFramePeriod*0.75)): #most of one frame period left
goResp.status = STOPPED
if goResp.status == STARTED:
theseKeys = event.getKeys(keyList=['z', 'm'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
if goResp.keys == []: # then this was the first keypress
goResp.keys = theseKeys[0] # just the first key pressed
goResp.rt = goResp.clock.getTime()
# was this 'correct'?
if (goResp.keys == str(corrGoResp)) or (goResp.keys == corrGoResp):
goResp.corr = 1
else:
goResp.corr = 0
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in StopTrialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "StopTrial"-------
for thisComponent in StopTrialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
sound_1.stop() #ensure sound has stopped at end of routine
# check responses
if goResp.keys in ['', [], None]: # No response was made
goResp.keys=None
# was no response the correct answer?!
if str(corrGoResp).lower() == 'none': goResp.corr = 1 # correct non-response
else: goResp.corr = 0 # failed to respond (incorrectly)
# store data for StopTrials (TrialHandler)
StopTrials.addData('goResp.keys',goResp.keys)
StopTrials.addData('goResp.corr', goResp.corr)
if goResp.keys != None: # we had a response
StopTrials.addData('goResp.rt', goResp.rt)
# the Routine "StopTrial" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
#------Prepare to start Routine "blankStop"-------
t = 0
blankStopClock.reset() # clock
frameN = -1
routineTimer.add(1.500000)
# update component parameters for each repeat
# keep track of which components have finished
blankStopComponents = []
blankStopComponents.append(text_4)
for thisComponent in blankStopComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "blankStop"-------
continueRoutine = True
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = blankStopClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_4* updates
if t >= 0.0 and text_4.status == NOT_STARTED:
# keep track of start time/frame for later
text_4.tStart = t # underestimates by a little under one frame
text_4.frameNStart = frameN # exact frame index
text_4.setAutoDraw(True)
if text_4.status == STARTED and t >= (0.0 + (1.5-win.monitorFramePeriod*0.75)): #most of one frame period left
text_4.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in blankStopComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "blankStop"-------
for thisComponent in blankStopComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
#------Prepare to start Routine "SSDChange"-------
t = 0
SSDChangeClock.reset() # clock
frameN = -1
# update component parameters for each repeat
if currentStopTrial['stopOrGo'] == 'stop':
if goResp.keys is None:
if SSD <= 800:
if xPosGoStim == -200:
SSDLeft = deepcopy(SSDLeft) + .05
SSD = SSDLeft
if xPosGoStim == 200:
SSDRight = deepcopy(SSDRight) + .05
SSD = SSDRight
else:
if SSD > 0:
if xPosGoStim == -200:
SSDLeft = deepcopy(SSDLeft) - .05
SSD = SSDLeft
if xPosGoStim == 200:
SSDRight = deepcopy(SSDRight) - .05
SSD = SSDRight
if currentStopTrial['stopOrGo'] == 'go':
goTrialCount = goTrialCount + 1
if goResp.corr and currentStopTrial['stopOrGo'] == 'go':
goCumRT = goCumRT + goResp.rt
goRTCount = goRTCount + 1
if currentStopTrial['stopOrGo'] == 'stop':
stopTrialCount = stopTrialCount + 1
if currentStopTrial['stopOrGo'] == 'stop' and goResp.keys is None:
stopSuccessCount = stopSuccessCount + 1
if currentStopTrial['stopOrGo'] == 'go':
if goResp.keys is None:
omissionCount = omissionCount + 1
elif goResp.corr == 0:
commissionCount = commissionCount + 1
StopTrials.addData("goTrailCountOutput", goTrialCount)
StopTrials.addData("goCumRTOutput", goCumRT)
StopTrials.addData("goRTCountOutput", goRTCount)
StopTrials.addData("stopTrialCountOutput", stopTrialCount)
StopTrials.addData("stopSuccessCountOutput", stopSuccessCount)
StopTrials.addData("omissionCountOutput", omissionCount)
StopTrials.addData("commissionCountOutput", commissionCount)
# keep track of which components have finished
SSDChangeComponents = []
for thisComponent in SSDChangeComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "SSDChange"-------
continueRoutine = True
while continueRoutine:
# get current time
t = SSDChangeClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in SSDChangeComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "SSDChange"-------
for thisComponent in SSDChangeComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
StopTrials.addData("EndingSSD", SSD)
# the Routine "SSDChange" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
thisExp.nextEntry()
# completed 18 repeats of 'StopTrials'
# get names of stimulus parameters
if StopTrials.trialList in ([], [None], None): params = []
else: params = StopTrials.trialList[0].keys()
# save data for this loop
StopTrials.saveAsExcel(filename + '.xlsx', sheetName='StopTrials',
stimOut=params,
dataOut=['n','all_mean','all_std', 'all_raw'])
#------Prepare to start Routine "endOfStopBlockFeedback"-------
t = 0
endOfStopBlockFeedbackClock.reset() # clock
frameN = -1
# update component parameters for each repeat
if goRTCount > 0:
goRTFeedback = goCumRT/goRTCount
goRTFeedback = round(goRTFeedback, 2)
else:
goRTFeedback = 'Null'
if goTrialCount > 0:
commissionRate = commissionCount/goTrialCount
commissionRate = round(commissionRate, 2)
omissionRate = omissionCount/goTrialCount
omissionRate = round(omissionRate, 2)
else:
commissionRate = 'Null'
omissionRate = 'Null'
if stopTrialCount > 0:
probabilityOfStop = stopSuccessCount/stopTrialCount
probabilityOfStop = round(probabilityOfStop, 2)
else:
probabilityOfStop = 'Null'
SSDFeedback = (SSDLeft+SSDRight)/2
SSDFeedback = round(SSDFeedback, 2)
stopMessage = " RT = " + str(goRTFeedback) + "\n Omission % = " + str(omissionRate) + "\n Commission % = " + str(commissionRate) + "\n\n\n\n " + str(probabilityOfStop) + "\n " + str(SSDFeedback)
text_5.setText(stopMessage
)
key_resp_3 = event.BuilderKeyResponse() # create an object of type KeyResponse
key_resp_3.status = NOT_STARTED
# keep track of which components have finished
endOfStopBlockFeedbackComponents = []
endOfStopBlockFeedbackComponents.append(text_5)
endOfStopBlockFeedbackComponents.append(key_resp_3)
for thisComponent in endOfStopBlockFeedbackComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "endOfStopBlockFeedback"-------
continueRoutine = True
while continueRoutine:
# get current time
t = endOfStopBlockFeedbackClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_5* updates
if t >= 0.0 and text_5.status == NOT_STARTED:
# keep track of start time/frame for later
text_5.tStart = t # underestimates by a little under one frame
text_5.frameNStart = frameN # exact frame index
text_5.setAutoDraw(True)
# *key_resp_3* updates
if t >= 0.0 and key_resp_3.status == NOT_STARTED:
# keep track of start time/frame for later
key_resp_3.tStart = t # underestimates by a little under one frame
key_resp_3.frameNStart = frameN # exact frame index
key_resp_3.status = STARTED
# keyboard checking is just starting
key_resp_3.clock.reset() # now t=0
event.clearEvents(eventType='keyboard')
if key_resp_3.status == STARTED:
theseKeys = event.getKeys(keyList=['y', 'n', 'left', 'right', 'space'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
key_resp_3.keys = theseKeys[-1] # just the last key pressed
key_resp_3.rt = key_resp_3.clock.getTime()
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in endOfStopBlockFeedbackComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "endOfStopBlockFeedback"-------
for thisComponent in endOfStopBlockFeedbackComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if key_resp_3.keys in ['', [], None]: # No response was made
key_resp_3.keys=None
# store data for StopBlocks (TrialHandler)
StopBlocks.addData('key_resp_3.keys',key_resp_3.keys)
if key_resp_3.keys != None: # we had a response
StopBlocks.addData('key_resp_3.rt', key_resp_3.rt)
# the Routine "endOfStopBlockFeedback" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
thisExp.nextEntry()
# completed 8 repeats of 'StopBlocks'
# get names of stimulus parameters
if StopBlocks.trialList in ([], [None], None): params = []
else: params = StopBlocks.trialList[0].keys()
# save data for this loop
StopBlocks.saveAsExcel(filename + '.xlsx', sheetName='StopBlocks',
stimOut=params,
dataOut=['n','all_mean','all_std', 'all_raw'])
#------Prepare to start Routine "End"-------
t = 0
EndClock.reset() # clock
frameN = -1
routineTimer.add(5.000000)
# update component parameters for each repeat
# keep track of which components have finished
EndComponents = []
EndComponents.append(text_6)
for thisComponent in EndComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "End"-------
continueRoutine = True
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = EndClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_6* updates
if t >= 0.0 and text_6.status == NOT_STARTED:
# keep track of start time/frame for later
text_6.tStart = t # underestimates by a little under one frame
text_6.frameNStart = frameN # exact frame index
text_6.setAutoDraw(True)
if text_6.status == STARTED and t >= (0.0 + (5-win.monitorFramePeriod*0.75)): #most of one frame period left
text_6.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in EndComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "End"-------
for thisComponent in EndComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
win.close()
core.quit()
| [
"[email protected]"
] | |
d6c2bad547afdcc4851a6ed9eed95609d1640240 | 2043a3bbe2c2ac4fcf293a578b4c82018871290b | /495-提莫攻击/495.py | 61a9feaa9420b9b22f46819187a87d008b5eb0e4 | [] | no_license | agave233/leetcode | 7f3804a5ec544a9fb1a6ae34a91886d283075e2a | 57629c9b0f9a7539bed9f28ba6771ee9d13aafaa | refs/heads/master | 2021-09-13T21:42:55.535234 | 2018-05-04T15:57:24 | 2018-05-04T15:57:24 | 124,670,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | class Solution(object):
def findPoisonedDuration(self, timeSeries, duration):
"""
:type timeSeries: List[int]
:type duration: int
:rtype: int
"""
if timeSeries == []:
return 0
res = 0
for i in range(1, len(timeSeries)):
res += min(duration, timeSeries[i] - timeSeries[i - 1])
return res + duration
| [
"[email protected]"
] | |
8ed01875c063dd4231bf7ef743d43b3956f30e37 | c1cd6a7a446934c428bc4fbf988f8d6680460488 | /dist/restclient.app/Contents/Resources/wx/lib/analogclock/__init__.py | dd4f8fbf2da9a0565f3d6ba9a9fb0217e29fc3aa | [] | no_license | devvmh/restclient-py2app | ed016d1763ee99779388c8700dfb9c129cf8ce1a | 6826f6cb81c08a36b30878683a58e4f7a18f5041 | refs/heads/master | 2021-01-10T12:01:31.411373 | 2016-01-18T03:34:02 | 2016-01-18T03:34:02 | 49,850,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | /Users/devin/git/restclient/venv/lib/python2.7/site-packages/wx/lib/analogclock/__init__.py | [
"[email protected]"
] | |
7cd3431aa501d19d14be6f45d4b21f1e5eda4263 | 12317e3617b1bd900d131c2047ec2e3211bcb16b | /musictrack-albumadd.py | 3cc9dec9f6c567bfb41ce67dad1f7e0dd357bf09 | [] | no_license | apocalyptech/musictrack-cli | b5fd7dedb4c6bb23ceea8e9e717ad2062a087a15 | 6563f9177df7f0a28039ffeabae005d940bdb205 | refs/heads/master | 2021-08-12T05:53:44.831965 | 2021-08-09T15:58:35 | 2021-08-09T15:58:35 | 72,576,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py | #!/usr/bin/env python3
# vim: set expandtab tabstop=4 shiftwidth=4:
# Adds a new album to the database
from app import App, AppArgumentParser
# Parse arguments
parser = AppArgumentParser(description='Adds a new album to the database')
group = parser.add_mutually_exclusive_group()
group.add_argument('-l', '--live',
action='store_true',
help='Store as a live album')
group.add_argument('-e', '--ep',
action='store_true',
help='Store as an EP')
parser.add_argument('-f', '--force',
action='store_true',
help='Force an update, if the album already exists')
parser.add_argument('filenames',
type=str,
nargs='+',
metavar='filename',
help='Filenames which make up the album')
args = parser.parse_args()
# Collapse our album type down a bit
if args.live:
album_type = 'live'
elif args.ep:
album_type = 'ep'
else:
album_type = 'album'
# Do the work
app = App(args.database)
(added, status) = app.add_album(args.filenames, album_type, force_update=args.force)
print(status)
app.close()
| [
"[email protected]"
] | |
760804ea8af855cfc7abba00fb01151a90c8e877 | 0e820627e68413aebe27fbc32dde66a3c99651d3 | /flamingo/core/context.py | 3a16f23a65abc8322d5d15a2a9a1734cc8c6f8c1 | [
"Apache-2.0"
] | permissive | ejoerns/flamingo | 17de116008561a4a64613b2b6528eb6d52706281 | 1d61c99c9ad34dd0a2a652f80783226051e07238 | refs/heads/master | 2020-04-11T02:53:47.295752 | 2018-12-14T11:17:23 | 2018-12-14T11:37:39 | 161,460,547 | 0 | 0 | Apache-2.0 | 2018-12-12T09:00:34 | 2018-12-12T09:00:34 | null | UTF-8 | Python | false | false | 7,368 | py | import logging
import shutil
import os
from flamingo.core.parser import FileParser, ParsingError
from flamingo.core.data_model import ContentSet, Content
from flamingo.core.utils.imports import acquire
class Context:
def __init__(self, settings):
self.settings = settings
# setup logging
self.logger = logging.getLogger('flamingo')
self.logger.debug('setting up context')
# setup plugins
self.plugins = []
plugins = (self.settings.CORE_PLUGINS +
self.settings.DEFAULT_PLUGINS +
self.settings.PLUGINS)
for plugin in plugins:
self.logger.debug("setting up plugin '%s' ", plugin)
try:
plugin_class = acquire(plugin)
self.plugins.append(plugin_class())
except Exception:
self.logger.error('plugin setup failed', exc_info=True)
# setup parser
self.parser = FileParser()
self.run_plugin_hook('parser_setup')
# parse contents
self.contents = ContentSet()
self.content = None
self._media = [] # FIXME: this should be part of Content()
for path in self.get_source_paths():
self.logger.debug("reading %s ", path)
try:
self.content = Content(
path=os.path.relpath(path, settings.CONTENT_ROOT))
self.parser.parse(path, self.content)
self.run_plugin_hook('content_parsed', self.content)
self.contents.add(self.content)
except ParsingError as e:
self.logger.error('%s: %s', path, e)
except Exception:
self.logger.error('exception occoured while reading %s',
path, exc_info=True)
del self.content
self.run_plugin_hook('contents_parsed')
# setup templating engine
templating_engine_class = acquire(settings.TEMPLATING_ENGINE)
self.templating_engine = templating_engine_class(
settings.THEME_PATHS + settings.CORE_THEME_PATHS
)
self.run_plugin_hook('templating_engine_setup', self.templating_engine)
self.run_plugin_hook('context_setup')
def get_source_paths(self):
self.logger.debug('searching for content')
supported_extensions = self.parser.get_extensions()
if self.settings.CONTENT_PATHS:
self.logger.debug('using user defined content paths')
for path in self.settings.CONTENT_PATHS:
path = os.path.join(self.settings.CONTENT_ROOT, path)
extension = os.path.splitext(path)[1][1:]
if extension not in supported_extensions:
self.logger.debug(
"skipping '%s'. extension '%s' is not supported",
path, extension)
continue
yield path
else:
self.logger.debug("searching content recursive in %s",
self.settings.CONTENT_ROOT)
for root, dirs, files in os.walk(self.settings.CONTENT_ROOT):
for name in files:
extension = os.path.splitext(name)[1][1:]
if extension not in supported_extensions:
continue
yield os.path.join(root, name)
def run_plugin_hook(self, name, *args, **kwargs):
self.logger.debug("running plugin hook '%s'", name)
for plugin in self.plugins:
hook = getattr(plugin, name, None)
if not hook:
continue
self.logger.debug('running %s.%s', plugin.__class__.__name__, name)
hook(self, *args, **kwargs)
def render(self, content, template_name=''):
template_name = template_name or content['template']
template_context = {
'content': content,
'context': self,
}
return self.templating_engine.render(template_name, template_context)
def copy_media(self, filename, content_source_path):
# gen source_path
if filename.startswith('/'):
source_path = os.path.join(
self.settings.CONTENT_ROOT, filename[1:])
else:
source_path = os.path.join(
os.path.dirname(
os.path.join(self.settings.CONTENT_ROOT,
content_source_path)
),
filename,
)
source_path = os.path.normpath(source_path)
# gen destination_path
destination_path = os.path.join(
self.settings.MEDIA_ROOT,
os.path.relpath(source_path, self.settings.CONTENT_ROOT),
)
# gen link
link = os.path.join(
'/media',
os.path.relpath(destination_path, self.settings.MEDIA_ROOT),
)
# check if media exists
if not os.path.exists(source_path):
self.logger.critical(
"media '%s' does not exist (used as '%s' in '%s')",
source_path, filename, content_source_path,
)
else:
self._media.append((source_path, destination_path, ))
return source_path, destination_path, link
def build(self, clean=True):
self.run_plugin_hook('pre_build')
def makedirs(path):
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
self.logger.debug('mkdir -p %s', dirname)
os.makedirs(dirname)
# remove previous artifacts
if clean and os.path.exists(self.settings.OUTPUT_ROOT):
self.logger.debug('rm -rf %s', self.settings.OUTPUT_ROOT)
shutil.rmtree(self.settings.OUTPUT_ROOT)
# render contents
for content in self.contents:
output_path = os.path.join(self.settings.OUTPUT_ROOT,
content['output'])
makedirs(output_path)
# render and write content
with open(output_path, 'w+') as f:
self.logger.debug("writing '%s'...", output_path)
if content['template']:
output = self.render(content)
else:
output = content['content']
f.write(output)
if self.settings.CONTENT_PATHS:
return
# copy media
for source_path, destination_path in self._media:
makedirs(destination_path)
self.logger.debug('cp %s %s', source_path, destination_path)
shutil.copy(source_path, destination_path)
# copy static
for static_dir in self.templating_engine.find_static_dirs():
for root, dirs, files in os.walk(static_dir):
for f in files:
src = os.path.join(root, f)
dst = os.path.join(
self.settings.STATIC_ROOT,
os.path.relpath(root, static_dir),
f,
)
self.logger.debug('cp %s %s', src, dst)
makedirs(dst)
shutil.copy(src, dst)
self.run_plugin_hook('post_build')
| [
"[email protected]"
] | |
f79ed3b7e1c14de99d256cd73c36799358a75bf8 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_enthroning.py | 9e1ce436218ffae4fce5553d7c0bc87d31485be5 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py |
#calss header
class _ENTHRONING():
def __init__(self,):
self.name = "ENTHRONING"
self.definitions = enthrone
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['enthrone']
| [
"[email protected]"
] | |
f4d1328cdd7e3637702bea1c4b7253ed5dfb96ad | 206330d8ce5266630f4500be8df90e59d72addc2 | /xcube/api/chunk.py | 0262939968fce4992e3e91ed0596b24ac8e8afd3 | [
"MIT"
] | permissive | dzelge/xcube | 4c70dda7e1efc4517e558af7ac9a2d1ae440b782 | 1e5049a227df4a50435d9aac6aacf2bcbaa3e2dd | refs/heads/master | 2020-06-17T15:56:54.187694 | 2019-07-08T07:39:08 | 2019-07-08T07:39:08 | 195,969,440 | 0 | 0 | MIT | 2019-07-09T08:46:03 | 2019-07-09T08:46:03 | null | UTF-8 | Python | false | false | 3,171 | py | import itertools
from typing import Dict, Tuple, Iterable
import numpy as np
import xarray as xr
from xcube.util.dsio import FORMAT_NAME_ZARR, FORMAT_NAME_NETCDF4
def chunk_dataset(dataset: xr.Dataset,
chunk_sizes: Dict[str, int] = None,
format_name: str = None) -> xr.Dataset:
"""
Chunk dataset and update encodings for given format.
:param dataset: input dataset
:param chunk_sizes: mapping from dimension name to new chunk size
:param format_name: format, e.g. "zarr" or "netcdf4"
:return: the re-chunked dataset
"""
chunked_ds = dataset.chunk(chunks=chunk_sizes)
# Update encoding so writing of chunked_ds recognizes new chunks
chunk_sizes_attr_name = None
if format_name == FORMAT_NAME_ZARR:
chunk_sizes_attr_name = "chunks"
if format_name == FORMAT_NAME_NETCDF4:
chunk_sizes_attr_name = "chunksizes"
if chunk_sizes_attr_name:
for var_name in chunked_ds.variables:
var = chunked_ds[var_name]
if chunk_sizes:
sizes = tuple(chunk_sizes[dim_name] if dim_name in chunk_sizes
else var.shape[var.dims.index(dim_name)]
for dim_name in var.dims)
var.encoding.update({chunk_sizes_attr_name: sizes})
elif chunk_sizes_attr_name in var.encoding:
# Remove any explicit and wrong specification so writing will use Dask chunks (TBC!)
del var.encoding[chunk_sizes_attr_name]
return chunked_ds
def get_empty_dataset_chunks(dataset: xr.Dataset) -> Dict[str, Tuple[Tuple[int, ...]]]:
"""
Identify empty dataset chunks and return their indices.
:param dataset: The dataset.
:return: A mapping from variable name to a list of block indices.
"""
return {var_name: get_empty_var_chunks(dataset[var_name]) for var_name in dataset.data_vars}
def get_empty_var_chunks(var: xr.DataArray) -> Tuple[Tuple[int, ...]]:
"""
Identify empty variable chunks and return their indices.
:param var: The variable.
:return: A list of block indices.
"""
chunks = var.chunks
if chunks is None:
raise ValueError('data array not chunked')
chunk_slices = compute_chunk_slices(chunks)
empty_chunk_indexes = []
for chunk_index, chunk_slice in chunk_slices:
data_index = tuple(slice(start, end) for start, end in chunk_slice)
data = var[data_index]
if np.all(np.isnan(data)):
empty_chunk_indexes.append(chunk_index)
# print(f'empty: {var.name}/{".".join(map(str, chunk_index))}')
# noinspection PyTypeChecker
return tuple(empty_chunk_indexes)
def compute_chunk_slices(chunks: Tuple[Tuple[int, ...], ...]) -> Iterable:
chunk_indices = []
for c in chunks:
chunk_indices.append(tuple(i for i in range(len(c))))
chunk_slices = []
for c in chunks:
x = []
o = 0
for s in c:
x.append((o, o + s))
o += s
chunk_slices.append(tuple(x))
return zip(itertools.product(*chunk_indices), itertools.product(*chunk_slices))
| [
"[email protected]"
] | |
3ac5574809b0a987b095ff860930ce7889a6010d | 3b9338d99cf8090387418e32ca81617f072c39fb | /build_system_kit/extpy/runme.py | 9984d9c89d00a26c95c9f157fb45f863659100e9 | [] | no_license | sillsdevarchive/wsiwaf | 8ca14c286bafceb9ee6fad740b64ad7131282dc3 | 2dcddafc3602a7220acbe995df4ba85abb06b767 | refs/heads/master | 2020-12-30T17:10:21.701380 | 2017-05-12T05:12:17 | 2017-05-12T05:12:17 | 91,052,898 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | #! /usr/bin/env python
# encoding: utf-8
"""
Create a waf file able to read wscript files ending in ".py"
execute a small test to show that it works
The waf file includes "extpy.py" which performs the required modifications
"""
import os, subprocess
up = os.path.dirname
join = os.path.join
cwd = os.getcwd()
extpy = join(cwd, 'extpy.py')
args = 'python waf-light --tools=compat15,%s --prelude=$"\tfrom waflib.extras import extpy\n" ' % extpy
root = up(up(cwd))
subprocess.Popen(args, cwd=root, shell=True).wait()
os.rename(join(root, 'waf'), join(cwd, 'waf.py'))
env = dict(os.environ)
if 'WAFDIR' in env:
del env['WAFDIR']
subprocess.Popen('python waf.py configure', cwd=cwd, shell=True, env=env).wait()
| [
"tnagy1024@f0382ac9-c320-0410-b3f0-b508d59f5a85"
] | tnagy1024@f0382ac9-c320-0410-b3f0-b508d59f5a85 |
6370647795edf476bd74936a53934d7af363aa50 | a6ed990fa4326c625a2a02f0c02eedf758ad8c7b | /meraki/sdk/python/getOrganizationSamlRole.py | 59bfeed4271e31d35e33f75e08f64889f85458c1 | [] | no_license | StevenKitavi/Meraki-Dashboard-API-v1-Documentation | cf2352976c6b6c00c17a5f6442cedf0aeed46c22 | 5ed02a7def29a2ce455a3f2cfa185f76f44789f5 | refs/heads/main | 2023-03-02T08:49:34.846055 | 2021-02-05T10:31:25 | 2021-02-05T10:31:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | import meraki
# Defining your API key as a variable in source code is not recommended
API_KEY = '6bec40cf957de430a6f1f2baa056b99a4fac9ea0'
# Instead, use an environment variable as shown under the Usage section
# @ https://github.com/meraki/dashboard-api-python/
dashboard = meraki.DashboardAPI(API_KEY)
organization_id = '549236'
saml_role_id = ''
response = dashboard.organizations.getOrganizationSamlRole(
organization_id, saml_role_id
)
print(response) | [
"[email protected]"
] | |
af55da00f419a54f60e6f9d444592cf6fc9dfe8a | 09301c71638abf45230192e62503f79a52e0bd80 | /besco_erp/besco_sale/general_sale_margin/report/__init__.py | 2b722f4eaeb7ee64f222222d8f751f72bc36c203 | [] | no_license | westlyou/NEDCOFFEE | 24ef8c46f74a129059622f126401366497ba72a6 | 4079ab7312428c0eb12015e543605eac0bd3976f | refs/heads/master | 2020-05-27T06:01:15.188827 | 2017-11-14T15:35:22 | 2017-11-14T15:35:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | # -*- coding: utf-8 -*-
##############################################################################
#
#
##############################################################################
import sales_fiscalyear
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
] | |
dd080f3fb34b5813c336f55367f9d6d793eaf669 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/apimanagement/v20190101/get_api_version_set.py | c4fb809e041874e3b5c0a04404aedfa32889fdd7 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 5,626 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetApiVersionSetResult',
'AwaitableGetApiVersionSetResult',
'get_api_version_set',
]
@pulumi.output_type
class GetApiVersionSetResult:
"""
Api Version Set Contract details.
"""
def __init__(__self__, description=None, display_name=None, name=None, type=None, version_header_name=None, version_query_name=None, versioning_scheme=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if version_header_name and not isinstance(version_header_name, str):
raise TypeError("Expected argument 'version_header_name' to be a str")
pulumi.set(__self__, "version_header_name", version_header_name)
if version_query_name and not isinstance(version_query_name, str):
raise TypeError("Expected argument 'version_query_name' to be a str")
pulumi.set(__self__, "version_query_name", version_query_name)
if versioning_scheme and not isinstance(versioning_scheme, str):
raise TypeError("Expected argument 'versioning_scheme' to be a str")
pulumi.set(__self__, "versioning_scheme", versioning_scheme)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description of API Version Set.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
Name of API Version Set
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="versionHeaderName")
def version_header_name(self) -> Optional[str]:
"""
Name of HTTP header parameter that indicates the API Version if versioningScheme is set to `header`.
"""
return pulumi.get(self, "version_header_name")
@property
@pulumi.getter(name="versionQueryName")
def version_query_name(self) -> Optional[str]:
"""
Name of query parameter that indicates the API Version if versioningScheme is set to `query`.
"""
return pulumi.get(self, "version_query_name")
@property
@pulumi.getter(name="versioningScheme")
def versioning_scheme(self) -> str:
"""
An value that determines where the API Version identifer will be located in a HTTP request.
"""
return pulumi.get(self, "versioning_scheme")
class AwaitableGetApiVersionSetResult(GetApiVersionSetResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApiVersionSetResult(
description=self.description,
display_name=self.display_name,
name=self.name,
type=self.type,
version_header_name=self.version_header_name,
version_query_name=self.version_query_name,
versioning_scheme=self.versioning_scheme)
def get_api_version_set(resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
version_set_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApiVersionSetResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
:param str version_set_id: Api Version Set identifier. Must be unique in the current API Management service instance.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
__args__['versionSetId'] = version_set_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:apimanagement/v20190101:getApiVersionSet', __args__, opts=opts, typ=GetApiVersionSetResult).value
return AwaitableGetApiVersionSetResult(
description=__ret__.description,
display_name=__ret__.display_name,
name=__ret__.name,
type=__ret__.type,
version_header_name=__ret__.version_header_name,
version_query_name=__ret__.version_query_name,
versioning_scheme=__ret__.versioning_scheme)
| [
"[email protected]"
] | |
ba1c31019cb0473c8d9a0fc745dd4ab39cc21871 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-waf/huaweicloudsdkwaf/v1/model/ge_o_ip_item.py | 72a31edbef1b0539fc0a31b68dbbd71f194d46b5 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 10,269 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class GeOIpItem:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'policyid': 'str',
'name': 'str',
'geoip': 'str',
'white': 'int',
'status': 'int',
'timestamp': 'int'
}
attribute_map = {
'id': 'id',
'policyid': 'policyid',
'name': 'name',
'geoip': 'geoip',
'white': 'white',
'status': 'status',
'timestamp': 'timestamp'
}
def __init__(self, id=None, policyid=None, name=None, geoip=None, white=None, status=None, timestamp=None):
"""GeOIpItem
The model defined in huaweicloud sdk
:param id: 规则id
:type id: str
:param policyid: 策略id
:type policyid: str
:param name: 地理位置控制规则名称
:type name: str
:param geoip: 地理位置封禁区域: (CN: 中国,CA: 加拿大,US: 美国,AU: 澳大利亚,IN: 印度,JP: 日本,UK: 英国,FR: 法国,DE: 德国,BR: 巴西,Ukraine: 乌克兰,North Korea: 朝鲜,The Republic of Korea: 韩国,Iran: 伊朗,Cuba: 古巴,Sultan: 苏丹,Syria: 叙利亚,Pakistan: 巴基斯坦,Palestine: 巴勒斯坦,Israel: 以色列,Iraq: 伊拉克,Afghanistan: 阿富汗,Libya: 利比亚,Turkey: 土耳其,Thailand: 泰国,Singapore: 新加坡,South Africa: 南非,Mexico: 墨西哥,Peru: 秘鲁,Indonesia: 印度尼西亚,GD: 广东,FJ: 福建,JL: 吉林,LN: 辽宁,TW: 台湾,GZ: 贵州,AH: 安徽,HL: 黑龙江,HA: 河南,SC: 四川,HE: 河北,YN: 云南,HB: 湖北,HI: 海南,QH: 青海,HN: 湖南,JX: 江西,SX: 山西,SN: 陕西,ZJ: 浙江,GS: 甘肃,JS: 江苏,SD: 山东,BJ: 北京,SH: 上海,TJ: 天津,CQ: 重庆,MO: 澳门,HK: 香港,NX: 宁夏,GX: 广西,XJ: 新疆,XZ: 西藏,NM: 内蒙古)
:type geoip: str
:param white: 防护动作: - 0 拦截 - 1 放行 - 2 仅记录
:type white: int
:param status: 规则状态,0:关闭,1:开启
:type status: int
:param timestamp: 创建规则时间戳
:type timestamp: int
"""
self._id = None
self._policyid = None
self._name = None
self._geoip = None
self._white = None
self._status = None
self._timestamp = None
self.discriminator = None
if id is not None:
self.id = id
if policyid is not None:
self.policyid = policyid
if name is not None:
self.name = name
if geoip is not None:
self.geoip = geoip
if white is not None:
self.white = white
if status is not None:
self.status = status
if timestamp is not None:
self.timestamp = timestamp
@property
def id(self):
"""Gets the id of this GeOIpItem.
规则id
:return: The id of this GeOIpItem.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this GeOIpItem.
规则id
:param id: The id of this GeOIpItem.
:type id: str
"""
self._id = id
@property
def policyid(self):
"""Gets the policyid of this GeOIpItem.
策略id
:return: The policyid of this GeOIpItem.
:rtype: str
"""
return self._policyid
@policyid.setter
def policyid(self, policyid):
"""Sets the policyid of this GeOIpItem.
策略id
:param policyid: The policyid of this GeOIpItem.
:type policyid: str
"""
self._policyid = policyid
@property
def name(self):
"""Gets the name of this GeOIpItem.
地理位置控制规则名称
:return: The name of this GeOIpItem.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this GeOIpItem.
地理位置控制规则名称
:param name: The name of this GeOIpItem.
:type name: str
"""
self._name = name
@property
def geoip(self):
"""Gets the geoip of this GeOIpItem.
地理位置封禁区域: (CN: 中国,CA: 加拿大,US: 美国,AU: 澳大利亚,IN: 印度,JP: 日本,UK: 英国,FR: 法国,DE: 德国,BR: 巴西,Ukraine: 乌克兰,North Korea: 朝鲜,The Republic of Korea: 韩国,Iran: 伊朗,Cuba: 古巴,Sultan: 苏丹,Syria: 叙利亚,Pakistan: 巴基斯坦,Palestine: 巴勒斯坦,Israel: 以色列,Iraq: 伊拉克,Afghanistan: 阿富汗,Libya: 利比亚,Turkey: 土耳其,Thailand: 泰国,Singapore: 新加坡,South Africa: 南非,Mexico: 墨西哥,Peru: 秘鲁,Indonesia: 印度尼西亚,GD: 广东,FJ: 福建,JL: 吉林,LN: 辽宁,TW: 台湾,GZ: 贵州,AH: 安徽,HL: 黑龙江,HA: 河南,SC: 四川,HE: 河北,YN: 云南,HB: 湖北,HI: 海南,QH: 青海,HN: 湖南,JX: 江西,SX: 山西,SN: 陕西,ZJ: 浙江,GS: 甘肃,JS: 江苏,SD: 山东,BJ: 北京,SH: 上海,TJ: 天津,CQ: 重庆,MO: 澳门,HK: 香港,NX: 宁夏,GX: 广西,XJ: 新疆,XZ: 西藏,NM: 内蒙古)
:return: The geoip of this GeOIpItem.
:rtype: str
"""
return self._geoip
@geoip.setter
def geoip(self, geoip):
"""Sets the geoip of this GeOIpItem.
地理位置封禁区域: (CN: 中国,CA: 加拿大,US: 美国,AU: 澳大利亚,IN: 印度,JP: 日本,UK: 英国,FR: 法国,DE: 德国,BR: 巴西,Ukraine: 乌克兰,North Korea: 朝鲜,The Republic of Korea: 韩国,Iran: 伊朗,Cuba: 古巴,Sultan: 苏丹,Syria: 叙利亚,Pakistan: 巴基斯坦,Palestine: 巴勒斯坦,Israel: 以色列,Iraq: 伊拉克,Afghanistan: 阿富汗,Libya: 利比亚,Turkey: 土耳其,Thailand: 泰国,Singapore: 新加坡,South Africa: 南非,Mexico: 墨西哥,Peru: 秘鲁,Indonesia: 印度尼西亚,GD: 广东,FJ: 福建,JL: 吉林,LN: 辽宁,TW: 台湾,GZ: 贵州,AH: 安徽,HL: 黑龙江,HA: 河南,SC: 四川,HE: 河北,YN: 云南,HB: 湖北,HI: 海南,QH: 青海,HN: 湖南,JX: 江西,SX: 山西,SN: 陕西,ZJ: 浙江,GS: 甘肃,JS: 江苏,SD: 山东,BJ: 北京,SH: 上海,TJ: 天津,CQ: 重庆,MO: 澳门,HK: 香港,NX: 宁夏,GX: 广西,XJ: 新疆,XZ: 西藏,NM: 内蒙古)
:param geoip: The geoip of this GeOIpItem.
:type geoip: str
"""
self._geoip = geoip
@property
def white(self):
"""Gets the white of this GeOIpItem.
防护动作: - 0 拦截 - 1 放行 - 2 仅记录
:return: The white of this GeOIpItem.
:rtype: int
"""
return self._white
@white.setter
def white(self, white):
"""Sets the white of this GeOIpItem.
防护动作: - 0 拦截 - 1 放行 - 2 仅记录
:param white: The white of this GeOIpItem.
:type white: int
"""
self._white = white
@property
def status(self):
"""Gets the status of this GeOIpItem.
规则状态,0:关闭,1:开启
:return: The status of this GeOIpItem.
:rtype: int
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this GeOIpItem.
规则状态,0:关闭,1:开启
:param status: The status of this GeOIpItem.
:type status: int
"""
self._status = status
@property
def timestamp(self):
"""Gets the timestamp of this GeOIpItem.
创建规则时间戳
:return: The timestamp of this GeOIpItem.
:rtype: int
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
"""Sets the timestamp of this GeOIpItem.
创建规则时间戳
:param timestamp: The timestamp of this GeOIpItem.
:type timestamp: int
"""
self._timestamp = timestamp
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GeOIpItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
82ae37234ed8faca25da728bce00c8bd915110f3 | c0ec2874527baba93c7c4107d1755326f800e470 | /tests/aws/lambda_/test_models.py | f900fb18d64ef56bcc9db65927fbb7bc0bdfc35a | [
"MIT"
] | permissive | koxudaxi/pydantic-collection | 10772ae426c38b07e58575e4849218a3fba29a6c | 6483517e9582109eafc3e2bc84e3a1a8660b9b81 | refs/heads/master | 2021-03-30T02:02:12.884974 | 2020-07-15T15:54:33 | 2020-07-15T15:54:33 | 248,004,593 | 4 | 0 | MIT | 2020-07-15T16:22:42 | 2020-03-17T15:25:07 | Python | UTF-8 | Python | false | false | 81 | py | from pydantic_collection.aws.lambda_ import models
def test_models():
pass
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.