hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
05167a6a94f7c83fc6497eed1db4333dd9bd4308
| 12,980 |
py
|
Python
|
weibospider.py
|
Chiang97912/WeiboSpider
|
2c426d2dfa8c6d418b66bd54002c292194872c88
|
[
"MIT"
] | null | null | null |
weibospider.py
|
Chiang97912/WeiboSpider
|
2c426d2dfa8c6d418b66bd54002c292194872c88
|
[
"MIT"
] | null | null | null |
weibospider.py
|
Chiang97912/WeiboSpider
|
2c426d2dfa8c6d418b66bd54002c292194872c88
|
[
"MIT"
] | 1 |
2021-05-07T06:35:22.000Z
|
2021-05-07T06:35:22.000Z
|
# -*- coding: UTF-8 -*-
import os
import json
import time
import rsa
import base64
import urllib
import binascii
import traceback
import requests
import pandas as pd
from lxml import etree
from datetime import datetime
class NoResultException(Exception):
def __init__(self):
super().__init__()
def __str__(self):
return 'No result'
class Config(object):
def __init__(self, **entries):
self.__dict__.update(entries)
class WeiboSpider(object):
def __init__(self, config):
self.year = config.year
self.month = config.month
self.day = config.day
self.query = config.query
self.config = config
self.weibo = list()
self.cookie = self.get_cookie()
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:54.0) Gecko/20100101 Firefox/54.0'
}
def get_cookie(self):
data = {
'entry': 'weibo',
'gateway': '1',
'from': '',
'savestate': '7',
'qrcode_flag': 'false',
'useticket': '1',
'pagerefer': 'https://login.sina.com.cn/crossdomain2.php?action=logout&r=https%3A%2F%2Fweibo.com%2Flogout.php%3Fbackurl%3D%252F',
'wsseretry': 'servertime_error',
'vsnf': '1',
'su': '',
'service': 'miniblog',
'servertime': '1529058370',
'nonce': 'CPEDL5',
'pwencode': 'rsa2',
'rsakv': '1330428213',
'sp': '',
'sr': '1536*864',
'encoding': 'UTF-8',
'prelt': '75',
'url': 'https://weibo.com/ajaxlogin.php?framelogin=1&callback=parent.sinaSSOController.feedBackUrlCallBack',
'returntype': 'META'
}
username = self.config.username
password = self.config.password
pre_url = "http://login.sina.com.cn/sso/prelogin.php?entry=weibo&callback=sinaSSOController.preloginCallBack&su=emhlZGFwYXQlNDAxNjMuY29t&rsakt=mod&client=ssologi"
s = requests.session()
res = s.get(pre_url)
res = res.text.split('(')[-1].split(')')[0]
pre_json = json.loads(res)
servertime = pre_json['servertime']
nonce = pre_json['nonce']
rsakv = pre_json['rsakv']
pubkey = pre_json['pubkey']
su = base64.encodestring(urllib.parse.quote(
username).encode(encoding="utf-8"))[:-1]
# rsa2计算sp
rsaPubkey = int(pubkey, 16)
key = rsa.PublicKey(rsaPubkey, 65537)
message = str(servertime) + '\t' + str(nonce) + '\n' + str(password)
sp = rsa.encrypt(message.encode(encoding="utf-8"), key)
sp = binascii.b2a_hex(sp)
data['servertime'] = servertime
data['nonce'] = nonce
data['rsakv'] = rsakv
data['su'] = su
data['sp'] = sp
url = 'http://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.18)&wsseretry=servertime_error'
res = requests.post(url, data=data)
cookie = res.cookies.get_dict()
return cookie
def set_encoding(self, res):
'''
解决weibo网页不同编码问题
'''
code = ['UTF-8', 'GBK']
for item in code:
if item in res.text:
res.encoding = item
break
def extract_digit(self, s):
if s:
return ''.join([x for x in s if x.isdigit()])
else:
return ''
def get_detail_info(self, url, weibo):
res = requests.get(url, headers=self.headers, cookies=self.cookie)
res.encoding = 'utf-8'
html = res.text
lines = html.splitlines() # splitlines将字符串按照\n切割
weibo['gender'] = ''
weibo['location'] = ''
weibo['age'] = ''
for line in lines:
line = line.replace(r'\t', '')
line = line.replace(r'\n', '')
line = line.replace(r'\r', '')
if line.startswith('<script>FM.view({"ns":"pl.header.head.index","domid":"Pl_Official_Headerv6__1"'):
n = line.find('html":"')
if n > 0:
line = line[n + 7: -12].replace("\\", "") # 去掉所有的斜杠
if not line.find('<div class="search_noresult">') > 0:
parser = etree.HTML(line)
temp = parser.xpath(
'//*[@class="pf_username"]/span/a/i/@class')[0].split(' ')[1]
if temp == 'icon_pf_male':
weibo['gender'] = '男'
elif temp == 'icon_pf_female':
weibo['gender'] = '女'
if line.startswith('<script>FM.view({"ns":"pl.content.homeFeed.index","domid":"Pl_Core_UserInfo'):
n = line.find('html":"')
if n > 0:
line = line[n + 7: -12].replace("\\", "") # 去掉所有的斜杠
if not line.find('<div class="search_noresult">') > 0:
parser = etree.HTML(line)
# lv = parser.cssselect(
# '.W_icon_level > span')
# lv = lv[0].text[3:] if len(lv) > 0 else ''
# weibo['lv'] = lv # 等级
t = 1
flag1 = False
flag2 = False
while True:
try:
icon = parser.xpath(
'//*[@class="WB_innerwrap"]/div/div/ul/li[{}]/span[1]/em/@class'.format(t))[0].split(' ')[1]
if icon == 'ficon_cd_place':
flag1 = True
weibo['location'] = parser.xpath(
'//*[@class="WB_innerwrap"]/div/div/ul/li[{}]/span[2]'.format(t))[0].xpath('string(.)').strip()
elif icon == 'ficon_constellation':
flag2 = True
age_text = parser.xpath(
'//*[@class="WB_innerwrap"]/div/div/ul/li[{}]/span[2]'.format(t))[0].xpath('string(.)').strip()
y = age_text.split('年')[0]
if y.isdigit():
weibo['age'] = datetime.now().year - int(y)
else:
weibo['age'] = ''
t += 1
except Exception as e:
break
if flag1 and flag2:
break
def get_one_page(self, html):
selecter = etree.HTML(html)
k = 1
while True:
weibo = dict()
try:
div = selecter.xpath('//*[@id="pl_feedlist_index"]/div[2]/div[{}]'.format(k))
if len(div) == 0:
break
name = selecter.xpath('//*[@id="pl_feedlist_index"]/div[2]/div[{}]/div/div[1]/div[2]/div[1]/div[2]/a'.format(k))
weibo['name'] = name[0].text.strip() if len(name) > 0 else ''
content = selecter.xpath(
'//*[@id="pl_feedlist_index"]/div[2]/div[{}]/div/div[1]/div[2]/p[1]'.format(k))
weibo['content'] = content[0].xpath('string(.)').strip() if len(content) > 0 else ''
release_time = selecter.xpath(
'//*[@id="pl_feedlist_index"]/div[2]/div[{}]/div/div[1]/div[2]/p[@class="from"]/a[1]'.format(k))
weibo['release_time'] = release_time[0].xpath('string(.)').strip() if len(release_time) > 0 else ''
transpond = selecter.xpath(
'//*[@id="pl_feedlist_index"]/div[2]/div[{}]/div/div[2]/ul/li[2]/a'.format(k))
transpond = transpond[0].text if len(transpond) > 0 else ''
transpond = self.extract_digit(transpond)
if transpond:
weibo['transpond_num'] = transpond
else:
weibo['transpond_num'] = 0
comment = selecter.xpath(
'//*[@id="pl_feedlist_index"]/div[2]/div[{}]/div/div[2]/ul/li[3]/a'.format(k))
comment = comment[0].text if len(comment) > 0 else ''
comment = self.extract_digit(comment)
if comment:
weibo['comment_num'] = comment
else:
weibo['comment_num'] = 0
thumbsup = selecter.xpath(
'//*[@id="pl_feedlist_index"]/div[2]/div[{}]/div/div[2]/ul/li[4]/a/em'.format(k))
thumbsup = thumbsup[0].text if len(thumbsup) > 0 else ''
thumbsup = self.extract_digit(thumbsup)
if thumbsup:
weibo['thumbsup_num'] = thumbsup
else:
weibo['thumbsup_num'] = 0
homepage_url = selecter.xpath(
'//*[@id="pl_feedlist_index"]/div[2]/div[{}]/div/div[1]/div[2]/div[1]/div[2]/a[1]/@href'.format(k))
homepage_url = homepage_url[0] if len(homepage_url) > 0 else ''
if homepage_url:
h = homepage_url[2:].split('/')
if h[1] == 'u':
weibo['uid'] = h[2].split('?')[0]
else:
weibo['uid'] = h[1].split('?')[0]
homepage_url = 'https:' + homepage_url
self.get_detail_info(homepage_url, weibo)
except Exception as e:
print(traceback.print_exc())
break
k += 1
self.weibo.append(weibo)
def save(self):
columns_map = {
'name': '微博名称',
'location': '微博所在地',
'gender': '性别',
'content': '微博内容',
'transpond_num': '转发量',
'comment_num': '评论量',
'thumbsup_num': '点赞量',
'uid': '用户ID',
'age': '年龄',
'release_time': '发布时间'
}
df = pd.DataFrame(self.weibo)
df.rename(columns=columns_map, inplace=True)
columns = ['微博名称', '用户ID', '性别', '年龄', '微博所在地', '微博内容', '发布时间', '转发量', '评论量', '点赞量']
df.to_excel('./data/{}年{}月{}日.xlsx'.format(self.year, self.month, self.day), columns=columns)
def start(self):
page_index = 1
while True:
url = 'https://s.weibo.com/weibo?q={}&typeall=1&suball=1×cope=custom:{}-{}-{}-0:{}-{}-{}-23&Refer=g&page={}'.format(
self.query, self.year, str(self.month).zfill(2), str(self.day).zfill(2), self.year, str(self.month).zfill(2), str(self.day).zfill(2), page_index)
if page_index == 51:
break
try:
res = requests.get(url, headers=self.headers, cookies=self.cookie)
except Exception as e:
print(e)
page_index += 1
continue
self.set_encoding(res)
html = res.text
if '新浪通行证' in html:
self.cookie = self.get_cookie()
res = requests.get(url, headers=self.headers, cookies=self.cookie)
self.set_encoding(res)
html = res.text
print('cookie updated!')
print('正在抓取{}年{}月{}日 第{}页数据'.format(self.year, self.month, self.day, page_index))
try:
self.get_one_page(html)
except NoResultException as e:
print(e)
break
time.sleep(0.5)
page_index += 1
self.save()
def main():
blacklist_file = 'blacklist.txt' # 黑名单文件
config = {
'query': '共享单车', # 查询关键词
'start_month': 1, # 开始月份
'start_day': 1, # 开始天数
'username': 'xxxxxxxxxxxx', # 账号
'password': 'xxxxxxxxxxxx', # 密码
}
years = ['2018', '2019']
config = Config(**config)
if not os.path.exists(blacklist_file):
open(blacklist_file, 'w').close() # 如果黑名单不存在就创建
if not os.path.exists('./data'):
os.makedirs('./data')
for year in years:
for month in range(config.start_month, 13):
for day in range(config.start_day, 32):
with open(blacklist_file) as f:
blacklist = [line.strip() for line in f.readlines()]
if '{}-{}-{}'.format(year, month, day) in blacklist:
continue
config.year = year
config.month = month
config.day = day
ws = WeiboSpider(config)
ws.start()
with open(blacklist_file, 'a') as f:
f.write('{}-{}-{}\n'.format(year, month, day))
print("数据抓取并保存完成")
if __name__ == '__main__':
main()
| 39.938462 | 170 | 0.469106 | 11,800 | 0.886152 | 0 | 0 | 0 | 0 | 0 | 0 | 3,565 | 0.267723 |
0516e5d4fd543c80d6f16ba01f4a7586b969a893
| 3,783 |
py
|
Python
|
spoty/commands/get_second_group.py
|
dy-sh/spoty
|
431a392707c8754da713871e0e7747bcc4417274
|
[
"MIT"
] | 2 |
2022-02-01T16:49:32.000Z
|
2022-03-02T18:30:31.000Z
|
spoty/commands/get_second_group.py
|
dy-sh/spoty
|
431a392707c8754da713871e0e7747bcc4417274
|
[
"MIT"
] | null | null | null |
spoty/commands/get_second_group.py
|
dy-sh/spoty
|
431a392707c8754da713871e0e7747bcc4417274
|
[
"MIT"
] | null | null | null |
from spoty.commands.first_list_commands import \
count_command, \
export_command, \
print_command
from spoty.commands.second_list_commands import \
filter_second_group, \
find_duplicates_second_command,\
find_deezer_second_group, \
find_spotify_second_group
from spoty.commands import get_group
from spoty.utils import SpotyContext
import click
@click.group("get")
@click.option('--spotify-playlist', '--sp', multiple=True,
help='Get tracks from Spotify playlist URI or ID.')
@click.option('--spotify-entire-library', '--s', multiple=True,
help='Get all tracks from Spotify library (by user URI or ID). To request a list for the current authorized user, use "me" as ID.')
@click.option('--spotify-entire-library-regex', '--sr', nargs=2, multiple=True,
help='Works the same as --spotify-entire-library, but you can specify regex filter which will be applied to playlists names. This way you can query any playlists by names.')
@click.option('--deezer-playlist', '--dp', multiple=True,
help='Get tracks from Deezer playlist URI or ID.')
@click.option('--deezer-entire-library', '--d', multiple=True,
help='Get all tracks from Deezer library (by user URI or ID). To request a list for the current authorized user, use "me" as ID.')
@click.option('--deezer-entire-library-regex', '--dr', nargs=2, multiple=True,
help='Works the same as --deezer-entire-library, but you can specify regex filter which will be applied to playlists names. This way you can query any playlists by names.')
@click.option('--audio', '--a', multiple=True,
help='Get audio files located at the specified local path. You can specify the audio file name as well.')
@click.option('--csv', '--c', multiple=True,
help='Get tracks from csv playlists located at the specified local path. You can specify the scv file name as well.')
@click.option('--m3u8', '--m', multiple=True,
help='Get tracks from m3u8 playlists located at the specified local path. You can specify the m3u8 file name as well.')
@click.option('--no-recursive', '-r', is_flag=True,
help='Do not search in subdirectories from the specified path.')
@click.pass_obj
def get_second(context: SpotyContext,
spotify_playlist,
spotify_entire_library,
spotify_entire_library_regex,
deezer_playlist,
deezer_entire_library,
deezer_entire_library_regex,
audio,
csv,
m3u8,
no_recursive
):
"""
Collect second list of tracks for further actions (see next commands).
"""
context.summary.append("Collecting second list:")
get_group.get_tracks_wrapper(context,
spotify_playlist,
spotify_entire_library,
spotify_entire_library_regex,
deezer_playlist,
deezer_entire_library,
deezer_entire_library_regex,
audio,
csv,
m3u8,
no_recursive,
)
get_second.add_command(filter_second_group.filter_second)
get_second.add_command(count_command.count_tracks)
get_second.add_command(print_command.print_tracks)
get_second.add_command(export_command.export_tracks)
get_second.add_command(find_duplicates_second_command.find_duplicates_second)
get_second.add_command(find_deezer_second_group.find_deezer)
get_second.add_command(find_spotify_second_group.find_spotify)
| 50.44 | 187 | 0.641819 | 0 | 0 | 0 | 0 | 2,990 | 0.790378 | 0 | 0 | 1,410 | 0.37272 |
05195432ec2c13cb2bd586385c70cb0f3fcc21ab
| 19,804 |
py
|
Python
|
jenkins_job_wrecker/modules/triggers.py
|
romanek-adam/jenkins-job-wrecker
|
db9379d852afe8b621c7688d34fd057d916de8f2
|
[
"MIT"
] | 1 |
2020-06-05T06:36:50.000Z
|
2020-06-05T06:36:50.000Z
|
jenkins_job_wrecker/modules/triggers.py
|
romanek-adam/jenkins-job-wrecker
|
db9379d852afe8b621c7688d34fd057d916de8f2
|
[
"MIT"
] | 15 |
2020-05-18T07:37:06.000Z
|
2020-08-24T09:16:08.000Z
|
jenkins_job_wrecker/modules/triggers.py
|
romanek-adam/jenkins-job-wrecker
|
db9379d852afe8b621c7688d34fd057d916de8f2
|
[
"MIT"
] | null | null | null |
# encoding=utf8
import jenkins_job_wrecker.modules.base
from jenkins_job_wrecker.helpers import get_bool, Mapper
class Triggers(jenkins_job_wrecker.modules.base.Base):
component = 'triggers'
def gen_yml(self, yml_parent, data):
triggers = []
for child in data:
object_name = child.tag.split('.')[-1].lower()
self.registry.dispatch(self.component, object_name, child, triggers)
yml_parent.append(['triggers', triggers])
def scmtrigger(top, parent):
pollscm = {}
for child in top:
if child.tag == 'spec':
pollscm['cron'] = child.text
elif child.tag == 'ignorePostCommitHooks':
pollscm['ignore-post-commit-hooks'] = (child.text == 'true')
else:
raise NotImplementedError('cannot handle scm trigger '
'setting %s' % child.tag)
parent.append({'pollscm': pollscm})
def timertrigger(top, parent):
parent.append({'timed': top[0].text})
def reversebuildtrigger(top, parent):
reverse = {}
for child in top:
if child.tag == 'upstreamProjects':
reverse['jobs'] = child.text
elif child.tag == 'threshold':
pass # TODO
elif child.tag == 'spec':
pass # TODO
else:
raise NotImplementedError('cannot handle reverse trigger '
'setting %s' % child.tag)
parent.append({'reverse': reverse})
def __gerrit_process_file_paths(attribute):
file_paths = []
for file_path_type in attribute:
if file_path_type.tag == "com.sonyericsson.hudson.plugins.gerrit.trigger.hudsontrigger.data.FilePath":
file_path = {}
for file_path_attribute in file_path_type:
if file_path_attribute.tag == "compareType":
file_path["compare-type"] = file_path_attribute.text
elif file_path_attribute.tag == "pattern":
file_path["pattern"] = file_path_attribute.text
file_paths.append(file_path)
else:
raise NotImplementedError("Not implemented file path type: ", file_path_type.tag)
return file_paths
def __gerrit_process_gerrit_projects(child):
projects = []
for gerrit_project in child:
project = {}
for attribute in gerrit_project:
if attribute.tag == "compareType":
project["project-compare-type"] = attribute.text
elif attribute.tag == "pattern":
project["project-pattern"] = attribute.text
elif attribute.tag == "branches":
branches = []
for branch_type in attribute:
if branch_type.tag == \
"com.sonyericsson.hudson.plugins.gerrit.trigger.hudsontrigger.data.Branch":
branch = {}
for branch_attribute in attribute[0]:
if branch_attribute.tag == "compareType":
branch["branch-compare-type"] = branch_attribute.text
elif branch_attribute.tag == "pattern":
branch["branch-pattern"] = branch_attribute.text
else:
raise NotImplementedError("Not implemented branch attribute: ",
branch_attribute.tag)
branches.append(branch)
else:
raise NotImplementedError("Not implemented branch type: ", branch_type.tag)
project["branches"] = branches
elif attribute.tag == "disableStrictForbiddenFileVerification":
project["disable-strict-forbidden-file-verification"] = get_bool(attribute.text)
elif attribute.tag == "filePaths":
file_paths = __gerrit_process_file_paths(attribute)
project["file-paths"] = file_paths
elif attribute.tag == "forbiddenFilePaths":
forbidden_file_paths = __gerrit_process_file_paths(attribute)
project["forbidden-file-paths"] = forbidden_file_paths
elif attribute.tag == "topics":
topics = []
for topic in attribute:
if topic.tag == \
"com.sonyericsson.hudson.plugins.gerrit.trigger.hudsontrigger.data.Topic":
topic_keys = {}
for topic_attribute in topic:
if topic_attribute.tag == "compareType":
topic_keys["compare-type"] = topic_attribute.text
elif topic_attribute.tag == "pattern":
topic_keys["pattern"] = topic_attribute.text
else:
raise NotImplementedError("Not implemented topic attribute: ", topic_attribute.tag)
topics.append(topic_keys)
else:
raise NotImplementedError("Not implemented topic type: ", topic.tag)
project["topics"] = topics
else:
raise NotImplementedError("Not implemented attribute: ", attribute.tag)
projects.append(project)
return projects
def __gerrit_process_trigger_on_events(child):
trigger_on = []
sonyericsson_prefix = "com.sonyericsson.hudson.plugins.gerrit.trigger.hudsontrigger.events."
for event in child:
if event.tag == sonyericsson_prefix + "PluginChangeAbandonedEvent":
trigger_on.append("change-abandoned-event")
elif event.tag == sonyericsson_prefix + "PluginChangeMergedEvent":
trigger_on.append("change-merged-event")
elif event.tag == sonyericsson_prefix + "PluginChangeRestoredEvent":
trigger_on.append("change-restored-event")
elif event.tag == sonyericsson_prefix + "PluginCommentAddedEvent":
comment_added_event = {}
for element in event:
if element.tag == "verdictCategory":
comment_added_event["approval-category"] = element.text
elif element.tag == "commentAddedTriggerApprovalValue":
comment_added_event["approval-value"] = element.text
trigger_on.append({"comment-added-event": comment_added_event})
elif event.tag == sonyericsson_prefix + "PluginCommentAddedContainsEvent":
trigger_on.append({"comment-added-contains-event": {"comment-contains-value": event[0].text}})
elif event.tag == sonyericsson_prefix + "PluginDraftPublishedEvent":
trigger_on.append("draft-published-event")
elif event.tag == sonyericsson_prefix + "PluginPatchsetCreatedEvent":
patchset_created_event = {}
for attribute in event:
if attribute.tag == "excludeDrafts":
patchset_created_event["exclude-drafts"] = get_bool(attribute.text)
elif attribute.tag == "excludeTrivialRebase":
patchset_created_event["exclude-trivial-rebase"] = get_bool(attribute.text)
elif attribute.tag == "excludeNoCodeChange":
patchset_created_event["exclude-no-code-change"] = get_bool(attribute.text)
elif attribute.tag == "excludePrivateState":
patchset_created_event["exclude-private"] = get_bool(attribute.text)
elif attribute.tag == "excludeWipState":
patchset_created_event["exclude-wip"] = get_bool(attribute.text)
trigger_on.append({"patchset-created-event": patchset_created_event})
elif event.tag == sonyericsson_prefix + "PluginPrivateStateChangedEvent":
trigger_on.append("private-state-changed-event")
elif event.tag == sonyericsson_prefix + "PluginRefUpdatedEvent":
trigger_on.append("ref-updated-event")
elif event.tag == sonyericsson_prefix + "PluginTopicChangedEvent":
trigger_on.append("topic-changed-event")
elif event.tag == sonyericsson_prefix + "PluginWipStateChangedEvent":
trigger_on.append("wip-state-changed-event")
return trigger_on
def gerrittrigger(top, parent):
mapper = Mapper({
"silentMode": ("silent", bool),
"silentStartMode": ("silent-start", bool),
"escapeQuotes": ("escape-quotes", bool),
"dependencyJobsNames": ("dependency-jobs", str),
"nameAndEmailParameterMode": ("name-and-email-parameter-mode", str),
"commitMessageParameterMode": ("commit-message-parameter-mode", str),
"changeSubjectParameterMode": ("change-subject-parameter-mode", str),
"commentTextParameterMode": ("comment-text-parameter-mode", str),
"buildStartMessage": ("start-message", str),
"buildFailureMessage": ("failure-message", str),
"buildSuccessfulMessage": ("successful-message", str),
"buildUnstableMessage": ("unstable-message", str),
"buildNotBuiltMessage": ("notbuilt-message", str),
"buildUnsuccessfulFilepath": ("failure-message-file", str),
"customUrl": ("custom-url", str),
"serverName": ("server-name", str),
"dynamicTriggerConfiguration": ("dynamic-trigger-enabled", bool),
"triggerConfigURL": ("dynamic-trigger-url", str),
})
mapper_gerrit_build = Mapper({
"gerritBuildStartedVerifiedValue": ("gerrit-build-started-verified-value", int),
"gerritBuildStartedCodeReviewValue": ("gerrit-build-started-codereview-value", int),
"gerritBuildSuccessfulVerifiedValue": ("gerrit-build-successful-verified-value", int),
"gerritBuildSuccessfulCodeReviewValue": ("gerrit-build-successful-codereview-value", int),
"gerritBuildFailedVerifiedValue": ("gerrit-build-failed-verified-value", int),
"gerritBuildFailedCodeReviewValue": ("gerrit-build-failed-codereview-value", int),
"gerritBuildUnstableVerifiedValue": ("gerrit-build-unstable-verified-value", int),
"gerritBuildUnstableCodeReviewValue": ("gerrit-build-unstable-codereview-value", int),
"gerritBuildNotBuiltVerifiedValue": ("gerrit-build-notbuilt-verified-value", int),
"gerritBuildNotBuiltCodeReviewValue": ("gerrit-build-notbuilt-codereview-value", int)
})
gerrit_trigger = {}
is_override_votes = False
for child in top:
if mapper.map_element(child, gerrit_trigger):
pass # Handled by the mapper.
elif mapper_gerrit_build.map_element(child, gerrit_trigger):
# Jenkins Job Builder implementation uses "override-votes"
# key to override default vote values. For detail:
# https://docs.openstack.org/infra/jenkins-job-builder/triggers.html#triggers.gerrit
is_override_votes = True
elif child.tag == "gerritProjects":
gerrit_trigger["projects"] = __gerrit_process_gerrit_projects(child)
elif child.tag == "dynamicGerritProjects":
pass # No implementation by JJB
elif child.tag == "spec":
pass # Not needed in yml
elif child.tag == "skipVote":
skip_vote = {}
for attribute in child:
if attribute.tag == "onSuccessful":
skip_vote["successful"] = get_bool(attribute.text)
if attribute.tag == "onFailed":
skip_vote["failed"] = get_bool(attribute.text)
if attribute.tag == "onUnstable":
skip_vote["unstable"] = get_bool(attribute.text)
if attribute.tag == "onNotBuilt":
skip_vote["notbuilt"] = get_bool(attribute.text)
gerrit_trigger["skip-vote"] = skip_vote
elif child.tag == "notificationLevel":
if child.text is None:
gerrit_trigger["notification-level"] = "SERVER_DEFAULT"
else:
gerrit_trigger["notification-level"] = child.text
elif child.tag == "triggerOnEvents":
gerrit_trigger["trigger-on"] = __gerrit_process_trigger_on_events(child)
elif child.tag == "gerritTriggerTimerTask":
pass # Unconfigurable Attribute
elif child.tag == "triggerInformationAction":
pass # Unconfigurable Attribute
else:
raise NotImplementedError("Not implemented Gerrit Trigger Plugin's attribute: ", child.tag)
gerrit_trigger["override-votes"] = is_override_votes
parent.append({'gerrit': gerrit_trigger})
def githubpushtrigger(top, parent):
parent.append('github')
def ghprbtrigger(top, parent):
ghpr = {}
for child in top:
if child.tag == 'spec' or child.tag == 'cron':
ghpr['cron'] = child.text
elif child.tag == 'configVersion':
pass # Not needed
elif child.tag == 'adminlist':
if child.text:
ghpr['admin-list'] = child.text.strip().split('\n')
else:
ghpr['admin-list'] = []
elif child.tag == 'allowMembersOfWhitelistedOrgsAsAdmin':
ghpr['allow-whitelist-orgs-as-admins'] = get_bool(child.text)
elif child.tag == 'whitelist':
if child.text:
ghpr['white-list'] = child.text.strip().split('\n')
else:
ghpr['white-list'] = []
elif child.tag == 'orgslist':
if child.text:
ghpr['org-list'] = child.text.strip().split('\n')
else:
ghpr['org-list'] = []
elif child.tag == 'buildDescTemplate':
ghpr['build-desc-template'] = child.text
elif child.tag == 'triggerPhrase':
ghpr['trigger-phrase'] = child.text
elif child.tag == 'onlyTriggerPhrase':
ghpr['only-trigger-phrase'] = get_bool(child.text)
elif child.tag == 'useGitHubHooks':
ghpr['github-hooks'] = get_bool(child.text)
elif child.tag == 'permitAll':
ghpr['permit-all'] = get_bool(child.text)
elif child.tag == 'autoCloseFailedPullRequests':
ghpr['auto-close-on-fail'] = get_bool(child.text)
elif child.tag == 'blackListCommitAuthor':
if child.text:
ghpr['black-list-commit-author'] = child.text.strip().split(' ')
else:
ghpr['black-list-commit-author'] = []
elif child.tag == 'blackListLabels':
if child.text:
ghpr['black-list-labels'] = child.text.strip().split('\n')
else:
ghpr['black-list-labels'] = []
elif child.tag == 'blackListTargetBranches':
ghpr['black-list-target-branches'] = [item[0].text.strip() for item in child if item[0].text is not None]
elif child.tag == 'displayBuildErrorsOnDownstreamBuilds':
ghpr['display-build-errors-on-downstream-builds'] = get_bool(child.text)
elif child.tag == 'excludedRegions':
if child.text:
ghpr['excluded-regions'] = child.text.strip().split('\n')
else:
ghpr['excluded-regions'] = []
elif child.tag == 'includedRegions':
if child.text:
ghpr['included-regions'] = child.text.strip().split('\n')
else:
ghpr['included-regions'] = []
elif child.tag == 'skipBuildPhrase':
ghpr['skip-build-phrase'] = child.text
elif child.tag == 'whiteListLabels':
if child.text:
ghpr['white-list-labels'] = child.text.strip().split('\n')
else:
ghpr['white-list-labels'] = []
elif child.tag == 'whiteListTargetBranches':
ghpr['white-list-target-branches'] = [item[0].text.strip() for item in child if item[0].text is not None]
elif child.tag == 'gitHubAuthId':
ghpr['auth-id'] = child.text
elif child.tag == 'extensions':
extensions_prefix = "org.jenkinsci.plugins.ghprb.extensions."
for extension in child:
if extension.tag == extensions_prefix+"status.GhprbSimpleStatus":
for extension_child in extension:
if extension_child.tag == "commitStatusContext":
ghpr['status-context'] = extension_child.text
elif extension_child.tag == "triggeredStatus":
ghpr['triggered-status'] = extension_child.text
elif extension_child.tag == "startedStatus":
ghpr['started-status'] = extension_child.text
elif extension_child.tag == "statusUrl":
ghpr['status-url'] = extension_child.text
elif extension_child.tag == "addTestResults":
ghpr['status-add-test-results'] = get_bool(extension_child.text)
elif extension_child.tag == "completedStatus":
for status in extension_child:
if status[1].text == "SUCCESS":
ghpr['success-status'] = status[0].text
elif status[1].text == "FAILURE":
ghpr['failure-status'] = status[0].text
elif status[1].text == "ERROR":
ghpr['error-status'] = status[0].text
else:
raise NotImplementedError("GHPRB status %s is not implemented."
% status[1].text)
else:
raise NotImplementedError("GHPRB simple status type %s is not implemented."
% extension_child.tag)
elif extension.tag == extensions_prefix+"comments.GhprbBuildStatus":
for extension_child in extension:
if extension_child.tag == "messages":
for message in extension_child:
if message[1].text == "SUCCESS":
ghpr['success-comment'] = message[0].text
elif message[1].text == "FAILURE":
ghpr['failure-comment'] = message[0].text
elif message[1].text == "ERROR":
ghpr['error-comment'] = message[0].text
else:
raise NotImplementedError("GHPRB message %s is not implemented." % message[0].text)
else:
raise NotImplementedError("GHPRB extension type %s is not implemented."
% extension_child.tag)
elif extension.tag == extensions_prefix+"build.GhprbCancelBuildsOnUpdate":
ghpr['cancel-builds-on-update'] = True
elif extension.tag == extensions_prefix+"comments.GhprbCommentFile":
ghpr['comment-file'] = extension[0].text
elif extension.tag == extensions_prefix+"status.GhprbNoCommitStatus":
ghpr['no-commit-status'] = True
else:
raise NotImplementedError("GHPRB extension %s is not implemented." % extension.tag)
else:
raise NotImplementedError("GHPRB tag %s is not implemented." % child.tag)
parent.append({'github-pull-request': ghpr})
| 51.572917 | 119 | 0.569683 | 362 | 0.018279 | 0 | 0 | 0 | 0 | 0 | 0 | 6,048 | 0.305393 |
051d3484ddd9be778a5ba470d36fedfb5de63393
| 4,097 |
py
|
Python
|
tools/clean-parallel.py
|
ZJaume/clean
|
0c3c6bab8bf173687ec0bba6908097ef7bc38db2
|
[
"MIT"
] | 1 |
2021-06-02T03:08:32.000Z
|
2021-06-02T03:08:32.000Z
|
tools/clean-parallel.py
|
ZJaume/clean
|
0c3c6bab8bf173687ec0bba6908097ef7bc38db2
|
[
"MIT"
] | 1 |
2021-05-30T22:55:44.000Z
|
2021-06-02T08:47:56.000Z
|
tools/clean-parallel.py
|
ZJaume/clean
|
0c3c6bab8bf173687ec0bba6908097ef7bc38db2
|
[
"MIT"
] | 2 |
2021-06-01T19:07:43.000Z
|
2021-06-03T11:03:04.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
import regex
import argparse
# The variables below need to be adjusted for a language pair and dataset.
# To add a new language, define the list of alpha characters in the dict below.
MIN_LENGTH = 1 # minimum number of words in a sentence
MAX_LENGTH = 200 # maximum number of words in a sentence
RATIO_LENGTH = 0.3 # maximum length difference between the source and target sentence
RATIO_ALPHA_WORDS = 0.4 # minimum fraction of "real" words in a source sentence
RATIO_ALPHA_CHARS = 0.5 # minimum fraction of alpha characters in a source sentence
CHARS = {
'bg': r'[АаБбВвГгДддЕеЖжЗзИиЙйКкkasЛлМмНнОоПпРрСсТтУуФфХхЦцЧчШшЩщЪъЬьЮюЯя]',
'cs': r'[a-zÁáČčĎďÉéěÍíŇňÓóŘřŠšŤťÚúůÝýŽž]',
'ca': r'[a-zÀàÈèÉéÍíÒòÓóÚúÇç]',
'da': r'[a-zÆæØøÅå]',
'de': r'[a-zÄäÖöÜüß]',
'en': r'[a-z]',
'el': r'[a-zΑαΒβΓγΔδΕεΖζΗηΘθΙιΚκΛλΜμΝνΞξΟοΠπΡρΣσςΤτΥυΦφΧχΨψΩω]',
'es': r'[a-zÁáÉéÍíÓóÚúñÑ]',
'et': r'[a-zÕõÄäÖöÜü]',
'eu': r'[a-zñÑ]',
'fi': r'[a-zÅåÄäÖö]',
'fr': r'[a-zÂâÁáÀàâÇçÉéÈèÊêÓóÒòÔôŒœÜüÛûŸÿ]',
'ga': r'[abcdefghilmnoprstuáéíóúÁÉÍÓÚ]',
'gl': r'[a-zÁáÉéÍíÓóÚúÑñ]',
'hr': r'[abcčČćĆdđĐefghijklmnoprsšŠtuvzžŽ]',
'hu': r'[a-zÁáÉéÍíÓóÖöŐőŰű]',
'is': r'[abdefghijklmnoprstuvxyÁáðÐÉéÍíÓóÚúÝýÞþÆæÖö]',
'it': r'[a-zàÀèÈéÉìÌíÍîÎòÒóÓùÙúÚ]',
'lt': r'[aąbcČčdeĘęĖėfghiĮįyjklmnoprsŠštuŲųŪūvzŽž]',
'lv': r'[aĀābcČčdeĒēfgĢģhiĪījkĶķlĻļmnŅņoprsŠštuŪūvzŽž]',
'mt': r'[abĊċdefĠġghĦħiiejklmnopqrstuvwxŻżz]',
'nb': r'[a-zÂâÁáÀàâÉéÈèÊêÓóÒòÔôÜüÆæØøÅå]',
'nl': r'[a-zÂâÁáÀàâÉéÈèÊêÓóÒòÔôÚú]',
'no': r'[a-zÂâÁáÀàâÉéÈèÊêÓóÒòÔôÜüÆæØøÅå]',
'nn': r'[a-zÂâÁáÀàâÉéÈèÊêÓóÒòÔôÜüÆæØøÅå]',
'pl': r'[a-zĄąĆćĘꣳŃńÓóŚśŹźŻż]',
'ro': r'[a-zĂăÂâÎîȘșȚț]',
'sk': r'[a-záäÁÄčČďĎžéÉíÍĺĹľĽňŇóÓôÔŕŔšŠťŤúÚýÝžŽ]',
'sl': r'[abcčČdđĐefghijklmnoprsšŠtuvzžŽ]',
'sv': r'[a-zÅåÄäÖö]',
}
middle_period = regex.compile(r'\w+[\.\?\!] \p{Lu}\w*,? ')
def main():
args = parse_user_args()
for i, line in enumerate(sys.stdin):
fields = line.strip().split('\t')
if len(fields) < 2:
continue
src = fields[-2].strip()
trg = fields[-1].strip()
skip = clean_parallel(src, trg, args.src_lang, args.trg_lang)
if skip:
if args.debug:
sys.stderr.write("{}\t{}".format(skip, line))
continue
sys.stdout.write(line)
def clean_parallel(src, trg, src_lang, trg_lang):
if src.lower() == trg.lower():
return "IDENTICAL"
src_toks = src.split()
trg_toks = trg.split()
src_len = len(src_toks)
trg_len = len(trg_toks)
if not src_len or not trg_len:
return "EMPTY"
ratio_len = src_len / float(trg_len)
if ratio_len < RATIO_LENGTH or ratio_len > (1. / RATIO_LENGTH):
return "RATIO_LENGTH"
if src_len < MIN_LENGTH or trg_len < MIN_LENGTH:
return "TOO_SHORT"
if src_len > MAX_LENGTH or trg_len > MAX_LENGTH:
return "TOO_LONG"
num_alpha = sum(
[1 if re.match(CHARS[src_lang], t, re.IGNORECASE) else 0 for t in src_toks])
if num_alpha / float(src_len) < RATIO_ALPHA_WORDS:
return "RATIO_ALPHA"
char_alpha = len(re.findall(CHARS[src_lang], src, re.IGNORECASE))
if char_alpha / float(len(src.replace(' ', ''))) < RATIO_ALPHA_CHARS:
return "RATIO_CHARS"
if len(middle_period.findall(src)) != len(middle_period.findall(trg)):
return "MIDDLE_PERIOD"
if src_lang in CHARS and trg_lang in CHARS:
if (src[0].isalpha() and not src[0].isupper() and (len(src)>1 and src[1]!=')')) \
or (trg[0].isalpha() and not trg[0].isupper() and (len(trg)>1 and trg[1]!=')')):
return "START_CAPITAL"
return None
def parse_user_args():
parser = argparse.ArgumentParser()
parser.add_argument("-l1", "--src-lang", default='es')
parser.add_argument("-l2", "--trg-lang", default='en')
parser.add_argument("--debug", action='store_true')
return parser.parse_args()
if __name__ == "__main__":
main()
| 32.515873 | 96 | 0.640469 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,253 | 0.486294 |
051e064cf78fe1b3efaa1e563322f576984f94e9
| 24,624 |
py
|
Python
|
rubika/client.py
|
Bahman-Ahmadi/rubika
|
924e82434f9468cadf481af7b29695f642af7e99
|
[
"MIT"
] | 23 |
2021-12-06T09:54:01.000Z
|
2022-03-31T19:44:29.000Z
|
rubika/client.py
|
Bahman-Ahmadi/rubika
|
924e82434f9468cadf481af7b29695f642af7e99
|
[
"MIT"
] | 4 |
2022-01-08T19:27:40.000Z
|
2022-03-30T13:18:23.000Z
|
rubika/client.py
|
Bahman-Ahmadi/rubika
|
924e82434f9468cadf481af7b29695f642af7e99
|
[
"MIT"
] | 13 |
2021-12-08T14:18:39.000Z
|
2022-03-30T13:20:37.000Z
|
from pathlib import Path
from requests import post
from random import randint
from json import loads, dumps
import random, datetime, rubika.encryption
# because should be exist !
adminsAccess = {
"pin":"PinMessages",
"newAdmin":"SetAdmin",
"editInfo":"ChangeInfo",
"banMember":"BanMember",
"changeLink":"SetJoinLink",
"changeMembersAccess":"SetMemberAccess",
"deleteMessages":"DeleteGlobalAllMessages"
}
usersAccess = {
"addMember":"AddMember",
"viewAdmins":"ViewAdmins",
"viewMembers":"ViewMembers",
"sendMessage":"SendMessages"
}
class Bot:
def __init__(self, auth):
self.auth = auth
self.enc = rubika.encryption.encryption(auth)
@staticmethod
def _getURL():
return "https://messengerg2c64.iranlms.ir/"
'''
result = []
for i in range(11,99): result.append(f"https://messengerg2c{i}.iranlms.ir/")
return random.choice(result)
'''
def _requestSendFile(self, file):
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"requestSendFile",
"input":{
"file_name": str(file.split("/")[-1]),
"mime": file.split(".")[-1],
"size": Path(file).stat().st_size
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url=Bot._getURL()).json()["data_enc"]))["data"]
def _uploadFile(self, file):
frequest = Bot._requestSendFile(self, file)
bytef = open(file,"rb").read()
hash_send = frequest["access_hash_send"]
file_id = frequest["id"]
url = frequest["upload_url"]
header = {
'auth':self.auth,
'Host':url.replace("https://","").replace("/UploadFile.ashx",""),
'chunk-size':str(Path(file).stat().st_size),
'file-id':str(file_id),
'access-hash-send':hash_send,
"content-type": "application/octet-stream",
"content-length": str(Path(file).stat().st_size),
"accept-encoding": "gzip",
"user-agent": "okhttp/3.12.1"
}
if len(bytef) <= 131072:
header["part-number"], header["total-part"] = "1","1"
while True:
try:
j = post(data=bytef,url=url,headers=header).text
j = loads(j)['data']['access_hash_rec']
break
except Exception as e:
continue
return [frequest, j]
else:
t = random._floor(len(bytef) / 131072 + 1)
for i in range(1,t+1):
if i != t:
k = i - 1
k = k * 131072
while True:
try:
header["chunk-size"], header["part-number"], header["total-part"] = "131072", str(i),str(t)
o = post(data=bytef[k:k + 131072],url=url,headers=header).text
o = loads(o)['data']
break
except Exception as e:
continue
else:
k = i - 1
k = k * 131072
while True:
try:
header["chunk-size"], header["part-number"], header["total-part"] = str(len(bytef[k:])), str(i),str(t)
p = post(data=bytef[k:],url=url,headers=header).text
p = loads(p)['data']['access_hash_rec']
break
except Exception as e:
continue
return [frequest, p]
def sendMessage(self, chat_id, text, metadata=[], message_id=None):
inData = {
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,999999999)}",
"text":text,
"reply_to_message_id":message_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}
if metadata != [] : inData["input"]["metadata"] = {"meta_data_parts":metadata}
return post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps(inData))},url=Bot._getURL())
def editMessage(self, message_id, chat_id, newText):
return post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"editMessage",
"input":{
"message_id": message_id,
"object_guid": chat_id,
"text": newText
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url=Bot._getURL())
def deleteMessages(self, chat_id, message_ids):
return post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"deleteMessages",
"input":{
"object_guid":chat_id,
"message_ids":message_ids,
"type":"Global"
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url=Bot._getURL())
def getUserInfo(self, chat_id):
return loads(self.enc.decrypt(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getUserInfo",
"input":{
"user_guid":chat_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url=Bot._getURL()).json()["data_enc"]))
def getMessages(self, chat_id,min_id):
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getMessagesInterval",
"input":{
"object_guid":chat_id,
"middle_message_id":min_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url=Bot._getURL()).json().get("data_enc"))).get("data").get("messages")
def getInfoByUsername(self, username):
''' username should be without @ '''
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getObjectByUsername",
"input":{
"username":username
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url=Bot._getURL()).json().get("data_enc")))
def banGroupMember(self, chat_id, user_id):
return post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"banGroupMember",
"input":{
"group_guid": chat_id,
"member_guid": user_id,
"action":"Set"
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url=Bot._getURL())
def invite(self, chat_id, user_ids):
return post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"addGroupMembers",
"input":{
"group_guid": chat_id,
"member_guids": user_ids
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url=Bot._getURL())
def getGroupAdmins(self, chat_id):
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"client":{
"app_name":"Main",
"app_version":"2.9.5",
"lang_code":"fa",
"package":"ir.resaneh1.iptv",
"platform":"Android"
},
"input":{
"group_guid":chat_id
},
"method":"getGroupAdminMembers"
}))},url=Bot._getURL()).json().get("data_enc")))
def getMessagesInfo(self, chat_id, message_ids):
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getMessagesByID",
"input":{
"object_guid": chat_id,
"message_ids": message_ids
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))}, url=Bot._getURL()).json()["data_enc"])).get("data").get("messages")
def setMembersAccess(self, chat_id, access_list):
return post(json={
"api_version": "4",
"auth": self.auth,
"client": {
"app_name": "Main",
"app_version": "2.9.5",
"lang_code": "fa",
"package": "ir.resaneh1.iptv",
"platform": "Android"
},
"data_enc": self.enc.encrypt(dumps({
"access_list": access_list,
"group_guid": chat_id
})),
"method": "setGroupDefaultAccess"
}, url=Bot._getURL())
def getGroupMembers(self, chat_id):
return loads(self.enc.decrypt(post(json={
"api_version":"5",
"auth": self.auth,
"data_enc": self.enc.encrypt(dumps({
"method":"getGroupAllMembers",
"input":{
"group_guid": chat_id,
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))
}, url=Bot._getURL()).json()["data_enc"]))["data"]["in_chat_members"]
def getGroupInfo(self, chat_id):
return loads(self.enc.decrypt(post(
json={
"api_version":"5",
"auth": self.auth,
"data_enc": self.enc.encrypt(dumps({
"method":"getGroupInfo",
"input":{
"group_guid": chat_id,
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))}, url=Bot._getURL()).json()["data_enc"]))
def getGroupLink(self, chat_id):
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getGroupLink",
"input":{
"group_guid":chat_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url=Bot._getURL()).json().get("data_enc"))).get("data").get("join_link")
def changeGroupLink(self, chat_id):
return post(json={
"api_version":"4",
"auth":self.auth,
"client":{
"app_name":"Main",
"app_version":"2.8.1",
"lang_code":"fa",
"package":"ir.resaneh1.iptv",
"platform":"Android"
},
"data_enc":self.enc.encrypt(dumps({
"group_guid": chat_id
})),
"method":"setGroupLink",
},url=Bot._getURL())
def setGroupTimer(self, chat_id, time):
return post(json={
"api_version":"4",
"auth":self.auth,
"client":{
"app_name":"Main",
"app_version":"2.8.1",
"platform":"Android",
"package":"ir.resaneh1.iptv",
"lang_code":"fa"
},
"data_enc":self.enc.encrypt(dumps({
"group_guid": chat_id,
"slow_mode": time,
"updated_parameters":["slow_mode"]
})),
"method":"editGroupInfo"
},url=Bot._getURL())
def setGroupAdmin(self, chat_id, user_id, access_list=[]):
return post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"setGroupAdmin",
"input":{
"group_guid": chat_id,
"access_list": access_list,
"action": "SetAdmin",
"member_guid": user_id
},
"client":{
"app_name":"Main",
"app_version":"2.8.1",
"platform":"Android",
"package":"ir.resaneh1.iptv",
"lang_code":"fa"
}
}))},url=Bot._getURL())
def deleteGroupAdmin(self, chat_id, user_id, access_list=[]):
return post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"setGroupAdmin",
"input":{
"group_guid": chat_id,
"action": "UnsetAdmin",
"member_guid": user_id
},
"client":{
"app_name":"Main",
"app_version":"2.8.1",
"platform":"Android",
"package":"ir.resaneh1.iptv",
"lang_code":"fa"
}
}))},url=Bot._getURL())
def logout(self):
return post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"logout",
"input":{},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url=Bot._getURL())
def forwardMessages(self, From, message_ids, to):
return post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"forwardMessages",
"input":{
"from_object_guid": From,
"message_ids": message_ids,
"rnd": f"{randint(100000,999999999)}",
"to_object_guid": to
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url=Bot._getURL())
def seenChats(self, seenList):
# seenList should be a dict , keys are object guids and values are last message’s id, {"guid":"msg_id"}
return post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"seenChats",
"input":{
"seen_list": seenList
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url=Bot._getURL())
def sendChatAction(self, chat_id, action):
#every some seconds before sending message this request should send
return post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendChatActivity",
"input":{
"activity": action,
"object_guid": chat_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url=Bot._getURL())
def pin(self, chat_id, message_id):
return post(json={"api_version": "4", "auth": self.auth, "client": {
"app_name": "Main",
"app_version": "2.9.5",
"lang_code": "fa",
"package": "ir.resaneh1.iptv",
"platform": "Android"
},
"data_enc": self.enc.encrypt(dumps({
"action":"Pin",
"message_id": message_id,
"object_guid": chat_id
})),
"method": "setPinMessage"
},url=Bot._getURL())
def unpin(self, chat_id, message_id):
return post(json={"api_version": "4", "auth": self.auth, "client": {
"app_name": "Main",
"app_version": "2.9.5",
"lang_code": "fa",
"package": "ir.resaneh1.iptv",
"platform": "Android"
},
"data_enc": self.enc.encrypt(dumps({
"action":"Unpin",
"message_id": message_id,
"object_guid": chat_id
})),
"method": "setPinMessage"
},url=Bot._getURL())
def joinGroup(self, link):
hashLink = link.split("/")[-1]
return post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"joinGroup",
"input":{
"hash_link": hashLink
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url=Bot._getURL())
def leaveGroup(self, chat_id):
return post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"leaveGroup",
"input":{
"group_guid": chat_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url=Bot._getURL())
def block(self, chat_id):
return post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"setBlockUser",
"input":{
"action": "Block",
"user_guid": chat_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url=Bot._getURL())
def unblock(self, chat_id):
return post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"setBlockUser",
"input":{
"action": "Unblock",
"user_guid": chat_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url=Bot._getURL())
def sendPhoto(self, chat_id, file, size, thumbnail=None, caption=None, message_id=None):
uresponse = Bot._uploadFile(self, file)
file_inline = {
"dc_id": uresponse[0]["dc_id"],
"file_id": uresponse[0]["id"],
"type":"Image",
"file_name": file.split("/")[-1],
"size": str(Path(file).stat().st_size),
"mime": file.split(".")[-1],
"access_hash_rec": uresponse[1],
"width": size[0],
"height": size[1],
"thumb_inline": thumbnail or "iVBORw0KGgoAAAANSUhEUgAAABwAAAAoCAYAAADt5povAAAAAXNSR0IArs4c6QAABZZJREFUWEftl2tMVEcUgM+Ze3fv7rLLCvLwxaNB0VpJCWqNIgqV+gpNLImxiTZoTZNa5YdpGi211aSJSdOkSU1qaorV2D/90TapJNrYVGttKKBgqYiioLLoWmAXQdjHfcyc5uKSoFlhFxp/NJ3N5mZnZ84359zzGoRnPPAZ8+B/oGkBBhCTJQgABACYz6eOsUw68t+YAp6QPO6eMYFLX4CktBSlMCOVPS8zUlBEPz0nMPqHhOevNlb7551wZ+QQUQ8aDTg8t3tjYo5dMTZLkuC1zUb9YBiGOEfTZI8NWQZU7OQoyLHOnZGKOXUt6skffjMuPA36JHD49/I8mDI30146PwuT3z0cPBJr6Bx5z1Ggamz9vmNDhx8+hL7Iu39M02hAtqPclhUOw8ud3bzpbKPeHAHyyNPcY35NQSPCTMdi29fbZmo6lPgH+bVTdXpDZN1jVokGxB3ltmxN5UXN7azuUpt6cxaAwtxgeyCAMQZiYAD6AcCang5uO4KDDIfa6Qv6yovt6RLyFZyLuxGzmvLHBbLd5basQZWXXPVgg2Kz9E53iZLcTPk5t4vSwyrd/+4X7efSJXLWvAy5zOun+wGVBq50qBecTstdElSia8aduICVG5TsoCZKWjzYkO6WfSGV57d7oSPBoRppLikXQAZZMsCmYLi317iRkiItSkzAEEfLtUkBW7uwPslm6Z2WytfOSGUzB0PQ43ZSotfHu0EwZrNgyBcAz1Qn5XGd/u5XWfOkgKaGBblsaLobKjLTGN9zPPglAAS6uyEYcSD5UKV9oQCx6VSt+DZ5quwFwyjWDOqcsElfLsCw28a2Ox0gt3TgjSkuSLPZwa4wZAankEVmVrcLleoatXpOthQAg4o1w5g4cEEmGzBd3es3OpwK63cnsiVDQdEvIzD/EFznqHgNVV+gk+iZnSk9FBoVq7rhmbCGqS7JL0t8BZLo4mC9FVL5Ik48nCAzu6cXryUloma3UF5IF13T0mT/pDQ0nQaEdm9+tn3VvGy2OBCkIVWH7nON+sWcWdL83Ewpw+2AqTe7oPnXK8Yf+bksPGENQ7oobr6NdRdbtauRjCGnpIDN5wMVAHQAUBITwWG1gu7zQcAM8PJi+ywGfKUQomvCJq1v0VojQDO1mVljpD6O1D4zm0jm/MZS2zSZxApVF/G/w7Amimrb2O9XO9T2WJN3eZFjOgejUELRE5eGZmoTjF7jHAJ3egwPY4DiKbXQPAyjRx1BRhpLTk2SsprajXMnLxi1sSbv4Vy6eqVetbYQtkMIHxkxlrqPAL4A1m/eCzvPNOlNcQFLC/Wq1QtpqwgBlyWQGBCC+Yk2CIgTCGJIfSFs3LafVZ66rDfGBVy9XK9as5jeFEEQiMg0Aw0uzIpPI7XQRKOpucRAUizEgBH5w3ip4kO2c0LAVxbRNhEGwxdmtw8exU++P6+ftSrANDVS4+wACRzkz3ZZ1qwqoE8dDuHwBVhDxUc4OaBfZTfeP0xVx0/zmigWlVuPWcsyU8WJBIdw/TtAjbXtOUR7Tpzhp6MApetfW8tmpolvnBMFmgV4XZFRteYl2srDwPtCeK/6R/mLo6fVGgJAhiAoEgpOG1g/3iq/um4JHbDIJPUG2MVt+3FXXO/w7Q22jPXL+N6ypeItESCSZJQEIukaEpnhMardRQSwyDRyBtGn4qVN+/Gds4365Vi9FGbPBld1paVi5Yv0udC54AYKNDVjwx46epj84UaJAJHJKPUPSmfy3tC2eAfBH603fWojvG+LkluYTwfWLhOvA5pix4h8AhCCCY9Xaj54Aj74qkb9KdZGePTp0WyI05OV5XMyKN9hBRsS0HD4jxrmnMpBv/+Abp1rlM7f8oa74m31R8SNezGJ4rHj7hnvQvpMr2uxVqW41o2nYVzCYln83wf+AyQsJlbR2o/9AAAAAElFTkSuQmCC"
}
inData = {
"method":"sendMessage",
"input":{
"file_inline": file_inline,
"object_guid": chat_id,
"rnd": f"{randint(100000,999999999)}",
"reply_to_message_id": message_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}
if caption != None: inData["input"]["text"] = caption
data = {"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps(inData))}
return post(json=data,url=Bot._getURL())
def sendVoice(self, chat_id, file, time, caption=None, message_id=None):
# file's format should be ogg. time should be ms (type: float).
uresponse = Bot._uploadFile(self, file)
inData = {
"method":"sendMessage",
"input":{
"file_inline": {
"dc_id": uresponse[0]["dc_id"],
"file_id": uresponse[0]["id"],
"type":"Voice",
"file_name": file.split("/")[-1],
"size": str(Path(file).stat().st_size),
"time": time,
"mime": file.split(".")[-1],
"access_hash_rec": uresponse[1],
},
"object_guid":chat_id,
"rnd":f"{randint(100000,999999999)}",
"reply_to_message_id":message_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}
if caption != None: inData["input"]["text"] = caption
data = {
"api_version":"5",
"auth":self.auth,
"data_enc":self.enc.encrypt(dumps(inData))
}
return post(json=data,url=Bot._getURL())
def sendDocument(self, chat_id, file, caption=None, message_id=None):
# Bot.sendDocument("guid","./file.txt", caption="anything", message_id="12345678")
uresponse = Bot._uploadFile(self, file)
file_id = str(uresponse[0]["id"])
mime = file.split(".")[-1]
dc_id = uresponse[0]["dc_id"]
access_hash_rec = uresponse[1]
file_name = file.split("/")[-1]
size = str(Path(file).stat().st_size)
inData = {
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"reply_to_message_id":message_id,
"rnd":f"{randint(100000,999999999)}",
"file_inline":{
"dc_id":str(dc_id),
"file_id":str(file_id),
"type":"File",
"file_name":file_name,
"size":size,
"mime":mime,
"access_hash_rec":access_hash_rec
}
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}
if caption != None: inData["input"]["text"] = caption
data = {
"api_version":"5",
"auth":self.auth,
"data_enc":self.enc.encrypt(dumps(inData))
}
while True:
try:
return loads(self.enc.decrypt(loads(post(json=data,url=Bot._getURL()).text)['data_enc']))
break
except: continue
def sendLocation(self, chat_id, location, message_id=None):
# location = [float(x), float(y)]
return post(json={
"api_version":"4",
"auth":self.auth,
"client":{
"app_name":"Main",
"app_version":"2.8.1",
"platform":"Android",
"package":"ir.resaneh1.iptv",
"lang_code":"fa"
},
"data_enc":self.enc.encrypt(dumps({
"is_mute": False,
"object_guid":chat_id,
"rnd":f"{randint(100000,999999999)}",
"location":{
"latitude": location[0],
"longitude": location[1]
},
"reply_to_message_id":message_id
})),
"method":"sendMessage"
},url=Bot._getURL())
def searchInChannelMembers(self, text, channel_guid):
try:
return loads(self.enc.decrypt(post(json={
"api_version":"4",
"auth":self.auth,
"client":{
"app_name":"Main",
"app_version":"2.8.1",
"platform":"Android",
"package":"ir.resaneh1.iptv",
"lang_code":"fa"
},
"data_enc":self.enc.encrypt(dumps({
"channel_guid": channel_guid,
"search_text": text
})),
"method":"getChannelAllMembers"
},url=Bot._getURL()).json()["data_enc"]))["in_chat_members"]
except KeyError: return None
def getChatsUpdate(self):
time_stamp = str(random._floor(datetime.datetime.today().timestamp()) - 200)
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getChatsUpdates",
"input":{
"state":time_stamp,
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url=Bot._getURL()).json().get("data_enc"))).get("data").get("chats")
def getChatUpdate(self, chat_id):
time_stamp = str(random._floor(datetime.datetime.today().timestamp()) - 200)
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getMessagesUpdates",
"input":{
"object_guid":chat_id,
"state":time_stamp
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url=Bot._getURL()).json().get("data_enc"))).get("data").get("updated_messages")
def myStickerSet(self):
time_stamp = str(random._floor(datetime.datetime.today().timestamp()) - 200)
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getMyStickerSets",
"input":{},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url=Bot._getURL()).json().get("data_enc"))).get("data")
class Socket:
data = {"error":[],"messages":[]}
def __init__(self, auth):
self.auth = auth
self.enc = rubika.encryption.encryption(auth)
def on_open(self, ws):
def handShake(*args):
ws.send(dumps({
"api_version": "4",
"auth": self.auth,
"data_enc": "",
"method": "handShake"
}))
import _thread
_thread.start_new_thread(handShake, ())
def on_error(self, ws, error):
Socket.data["error"].append(error)
def on_message(self, ws, message):
try:
parsedMessage = loads(message)
Socket.data["messages"].append({"type": parsedMessage["type"], "data": loads(self.enc.decrypt(parsedMessage["data_enc"]))})
except KeyError: pass
def on_close(self, ws, code, msg):
return {"code": code, "message": msg}
def handle(self, OnOpen=None, OnError=None, OnMessage=None, OnClose=None, forEver=True):
import websocket
ws = websocket.WebSocketApp(
"wss://jsocket3.iranlms.ir:80",
on_open=OnOpen or Socket(self.auth).on_open,
on_message=OnMessage or Socket(self.auth).on_message,
on_error=OnError or Socket(self.auth).on_error,
on_close=OnClose or Socket(self.auth).on_close
)
if forEver : ws.run_forever()
| 29.349225 | 2,034 | 0.634665 | 24,079 | 0.977788 | 0 | 0 | 211 | 0.008568 | 0 | 0 | 11,272 | 0.457728 |
051f4dab5a5f1bed25333ea9cb6d58c8c48a834b
| 424 |
py
|
Python
|
lpyHardway/logic/ex2.py
|
oreanroy/learn_modules
|
fb1debc612940b65c409d8f5b35a3b4e16e67494
|
[
"MIT"
] | null | null | null |
lpyHardway/logic/ex2.py
|
oreanroy/learn_modules
|
fb1debc612940b65c409d8f5b35a3b4e16e67494
|
[
"MIT"
] | 17 |
2019-12-01T16:56:29.000Z
|
2022-03-02T04:49:51.000Z
|
lpyHardway/logic/ex2.py
|
oreanroy/learn_modules
|
fb1debc612940b65c409d8f5b35a3b4e16e67494
|
[
"MIT"
] | 1 |
2019-09-28T00:43:54.000Z
|
2019-09-28T00:43:54.000Z
|
people = 30
cars = 40
buses = 15
if cars > people:
print "We should take the cars."
elif cars < people:
print "we should not take the cars."
else:
print "we can't decide."
if buses > cars:
print " That's too many buses"
elif buses < cars:
print " Maybe we could take the bus."
else:
print "we stil can't decide."
if people > buses:
print " Alright lets take the buses."
else:
print "Fine, let's stay home then."
| 17.666667 | 38 | 0.676887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 212 | 0.5 |
0520b1fd12c6c807e99e2585c0ad990c4a9c1185
| 3,001 |
py
|
Python
|
undercrawler/crazy_form_submitter.py
|
abael/ScrapyGenericCrawler
|
9d210fb862a7fddd58c548847d8f5c2d72eae5c1
|
[
"MIT"
] | 88 |
2016-04-07T18:41:19.000Z
|
2022-01-03T12:18:44.000Z
|
undercrawler/crazy_form_submitter.py
|
shekar9160/generic_scraper
|
e5104dca5a5d9fe4b9ddd085c7b0935a712ea74d
|
[
"MIT"
] | 61 |
2016-04-06T18:31:45.000Z
|
2021-07-15T12:10:23.000Z
|
undercrawler/crazy_form_submitter.py
|
shekar9160/generic_scraper
|
e5104dca5a5d9fe4b9ddd085c7b0935a712ea74d
|
[
"MIT"
] | 31 |
2016-04-14T07:49:49.000Z
|
2021-08-08T17:07:36.000Z
|
import logging
import random
import string
from scrapy.http.request.form import _get_inputs as get_form_data
logger = logging.getLogger(__name__)
SEARCH_TERMS = list(string.ascii_lowercase) + list('123456789 *%.?')
def search_form_requests(url, form, meta,
search_terms=None, extra_search_terms=None):
''' yield kwargs for search requests, using default search terms and
extra_search_terms, also randomly refining search if there are such
options in the form.
'''
refinement_options = [False]
if not any(input_type == 'search query'
for input_type in meta['fields'].values()):
return
n_target_inputs = sum(
input_type == 'search query' or
_is_refinement_input(input_type, form.inputs[input_name])
for input_name, input_type in meta['fields'].items())
assert n_target_inputs >= 0
# 2 and 4 here are just some values that feel right, need tuning
refinement_options.append([True] * 2 * min(2, n_target_inputs))
extra_search_terms = set(extra_search_terms or [])
main_search_terms = set(
search_terms if search_terms is not None else SEARCH_TERMS)
for search_term in (main_search_terms | extra_search_terms):
for do_random_refinement in refinement_options:
formdata = _fill_search_form(
search_term, form, meta, do_random_refinement)
if formdata is not None:
priority = -3 if do_random_refinement else -1
if search_term not in main_search_terms:
min_priority = min(
priority, -int(len(extra_search_terms) / 10))
priority = random.randint(min_priority, priority)
logger.debug(
'Scheduled search: "%s" at %s with priority %d%s',
search_term, url, priority,
' with random refinement' if do_random_refinement else '')
yield dict(
url=url,
formdata=formdata,
method=form.method,
priority=priority,
)
def _fill_search_form(search_term, form, meta, do_random_refinement=False):
additional_formdata = {}
search_fields = []
for input_name, input_type in meta['fields'].items():
input_el = form.inputs[input_name]
if input_type == 'search query':
search_fields.append(input_name)
elif do_random_refinement and \
_is_refinement_input(input_type, input_el):
if input_el.type == 'checkbox' and random.random() > 0.5:
additional_formdata[input_name] = 'on'
additional_formdata[random.choice(search_fields)] = search_term
return get_form_data(form, additional_formdata, None, None, None)
def _is_refinement_input(input_type, input_el):
return (input_type == 'search category / refinement' and
getattr(input_el, 'type', None) in ['checkbox'])
| 40.554054 | 78 | 0.638121 | 0 | 0 | 1,927 | 0.642119 | 0 | 0 | 0 | 0 | 455 | 0.151616 |
0520c8a0308bb129120ec328a9eacba21da937c0
| 277 |
py
|
Python
|
python/pid.py
|
gin2018/test_tools
|
46d911da6719ae2069ed4e87bdcc6922c21459a5
|
[
"MIT"
] | null | null | null |
python/pid.py
|
gin2018/test_tools
|
46d911da6719ae2069ed4e87bdcc6922c21459a5
|
[
"MIT"
] | null | null | null |
python/pid.py
|
gin2018/test_tools
|
46d911da6719ae2069ed4e87bdcc6922c21459a5
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
pid_file = open("pid.txt", "w")
x = np.linspace(0, 2 * np.pi, 100)
print x
pid_file.write(x)
y1, y2 = np.sin(x), np.cos(x)
plt.plot(x, y1, label='y = sin(x)')
plt.plot(x, y2, label='y = cos(x)')
plt.legend()
plt.show()
| 16.294118 | 35 | 0.624549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.129964 |
0524ab92ab97c6f8922dd3dd0c03bf3b79b8a0ee
| 921 |
py
|
Python
|
libs/libssh2/libssh2.py
|
simont77/craft-blueprints-kde
|
3c0a40923c7c8e0341ad08afde22f86bb1517ddf
|
[
"BSD-2-Clause"
] | null | null | null |
libs/libssh2/libssh2.py
|
simont77/craft-blueprints-kde
|
3c0a40923c7c8e0341ad08afde22f86bb1517ddf
|
[
"BSD-2-Clause"
] | 1 |
2020-01-10T01:06:16.000Z
|
2020-01-10T01:06:16.000Z
|
libs/libssh2/libssh2.py
|
simont77/craft-blueprints-kde
|
3c0a40923c7c8e0341ad08afde22f86bb1517ddf
|
[
"BSD-2-Clause"
] | 2 |
2020-01-02T18:22:12.000Z
|
2020-08-05T13:39:21.000Z
|
# -*- coding: utf-8 -*-
import info
class subinfo(info.infoclass):
def setTargets( self ):
self.svnTargets['master'] = 'https://github.com/libssh2/libssh2.git||libssh2-1.8.0'
self.targets['1.8.0'] = "https://www.libssh2.org/download/libssh2-1.8.0.tar.gz"
self.targetInstSrc['1.8.0'] = "libssh2-1.8.0"
self.patchToApply['master'] = ('0001-Ensure-other-libraries-are-told-the-correct-linkage-.patch', 1)
self.defaultTarget = 'master'
def setDependencies( self ):
self.buildDependencies['virtual/base'] = 'default'
self.runtimeDependencies['libs/zlib'] = 'default'
self.runtimeDependencies['libs/openssl'] = 'default'
from Package.CMakePackageBase import *
class Package(CMakePackageBase):
def __init__( self, **args ):
CMakePackageBase.__init__( self )
self.subinfo.options.configure.defines = "-DENABLE_ZLIB_COMPRESSION=ON "
| 38.375 | 108 | 0.667752 | 840 | 0.912052 | 0 | 0 | 0 | 0 | 0 | 0 | 348 | 0.37785 |
05260b29fa65b53dc965a1c89ebcef95a1a96d54
| 396 |
py
|
Python
|
test/config_generator_test.py
|
jnohlgard/projector-installer
|
52aeaa936aa21d9fa6aee109d78e209fa068821b
|
[
"Apache-2.0"
] | null | null | null |
test/config_generator_test.py
|
jnohlgard/projector-installer
|
52aeaa936aa21d9fa6aee109d78e209fa068821b
|
[
"Apache-2.0"
] | null | null | null |
test/config_generator_test.py
|
jnohlgard/projector-installer
|
52aeaa936aa21d9fa6aee109d78e209fa068821b
|
[
"Apache-2.0"
] | null | null | null |
"""Test config_generator.py module"""
from unittest import TestCase
from projector_installer.config_generator import token_quote
class ConfigGeneratorTest(TestCase):
"""Test config_generator.py module"""
def test_token_quote(self) -> None:
"""The token_quote method must return the same token in quotes"""
self.assertEqual(token_quote('some_token'), '\"some_token\"')
| 30.461538 | 73 | 0.739899 | 263 | 0.664141 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.421717 |
05273ebf4b8d4eb6302f146e1b519e163f850d92
| 5,289 |
py
|
Python
|
tooling/maven.py
|
AntonisGkortzis/Vulnerabilities-in-Reused-Software
|
16b2087cb595b48446dadda8cae75dad6ef1433b
|
[
"MIT"
] | 3 |
2020-11-24T20:30:59.000Z
|
2021-05-26T02:33:53.000Z
|
tooling/maven.py
|
AntonisGkortzis/Vulnerabilities-in-Reused-Software
|
16b2087cb595b48446dadda8cae75dad6ef1433b
|
[
"MIT"
] | null | null | null |
tooling/maven.py
|
AntonisGkortzis/Vulnerabilities-in-Reused-Software
|
16b2087cb595b48446dadda8cae75dad6ef1433b
|
[
"MIT"
] | null | null | null |
import os
import re
import logging
import zipfile
logger = logging.getLogger(__name__)
class MvnArtifact:
"""
Class representing a fully defined maven artifact
(e.g., <groupId>:<artifactId>:<type>:<version>[:<dep_type>])
"""
__elem_re = re.compile(r'^(.+?):(.+?):(.+?):(.+?)((:)(.+))?$')
def __init__(self, artifact_str):
elems = MvnArtifact.__elem_re.match(artifact_str).groups()
self.groupId = elems[0]
self.artifactId = elems[1]
self.type = elems[2]
self.version = elems[3]
self.dep_type = elems[6] # (e.g., compile, test, provided)
def __str__(self):
dt = '' if not 'dep_type' in self._dict_ else f':{self.dep_type}'
return f'{self.groupId}:{self.artifactId}:{self.type}:{self.version}{dt}'
def __eq__(self, other):
if isinstance(other, MvnArtifact):
return self.groupId == other.groupId and self.artifactId == other.artifactId \
and self.type == other.type and self.version == other.version
return NotImplemented
def __hash__(self):
d = self.__dict__
del d['dep_type']
return hash(tuple(sorted(d.items())))
def get_class_list(self, m2_home=os.path.expanduser('~/.m2')):
m2_home="/media/agkortzis/Data/m2"
art_path = self.get_m2_path(m2_home)
logger.debug("@@-zip file={}".format(art_path))
container = zipfile.ZipFile(art_path)
len_preffix = len('WEB-INF/classes/') if art_path.endswith('.war') else 0
if not art_path.endswith('.war') and not art_path.endswith('.jar'):
logger.warning(f'Unsupported file type: {os.path.splitext(art_path)[1]}')
return []
return [i[len_preffix:-6].replace(os.path.sep,'.') for i in container.namelist() if i.endswith('.class')]
def get_m2_path(self, m2_home=os.path.expanduser('~/.m2')):
m2_home="/media/agkortzis/Data/m2"
return os.sep.join([m2_home, 'repository',
self.groupId.replace('.', os.sep),
self.artifactId,
self.version,
f"{self.artifactId}-{self.version}.{self.type}"])
class ArtifactTree:
def __init__(self, artifact):
self.artifact = MvnArtifact(artifact)
self.deps = []
def __iter__(self):
yield self
for d in self.deps:
for t in d.__iter__():
yield t
def print_tree(self, indent=0):
print(' ' * indent, self.artifact)
for i in self.deps:
i.print_tree(indent+2)
def filter_deps(self, filter):
self.deps = [i for i in self.deps if filter(i)]
for i in self.deps:
i.filter_deps(filter)
def missing_m2_pkgs(self, m2_home=os.path.expanduser('~/.m2')):
m2_home="/media/agkortzis/Data/m2"
return [p for p in self if not os.path.exists(p.artifact.get_m2_path(m2_home))]
@staticmethod
def parse_tree_str(tree_str):
return ArtifactTree.__parse_tree([l[7:].rstrip() for l in tree_str.split('\n')], 0)
@staticmethod
def __parse_tree(tree_lst, i):
root_level, root_artifact = ArtifactTree.__parse_item(tree_lst[i])
t = ArtifactTree(root_artifact)
while i+1 < len(tree_lst) and root_level < ArtifactTree.__parse_item(tree_lst[i+1])[0]:
t.deps.append(ArtifactTree.__parse_tree(tree_lst, i+1))
tree_lst.pop(i+1)
return t
@staticmethod
def __parse_item(item):
parts = re.match(r'([ \+\-\|\\]*)(.+)', item).groups()
return int(len(parts[0])/3), parts[1]
def get_compiled_modules(project_trees_file):
with open(project_trees_file) as f:
try:
str_trees = split_trees([l.rstrip() for l in f.readlines()])
except:
logger.error(f'File is malformed: {project_trees_file}')
return []
trees = []
for t in str_trees:
t = ArtifactTree.parse_tree_str('\n'.join(t))
if t.artifact.type in ['jar', 'war']:
t.filter_deps(lambda d : d.artifact.dep_type == 'compile' and d.artifact.type in ['jar', 'war'])
trees.append(t)
return [t for t in trees if not t.missing_m2_pkgs()]
def filter_mvn_output(mvn_tree_output):
re_tree_element = re.compile(r'^\[INFO\] (\||\\\-|\+\-| )*([a-zA-Z_$][a-zA-Z\d_\-$]*\.)*[a-zA-Z_$][a-zA-Z\d_\-$]*:.+?:([a-zA-Z]+?):.+?(:[a-zA-Z\-]+)?$')
with open(tree_file, 'r') as f:
lines = f.readlines()
tree_lines = [l.rstrip() for l in lines if re_tree_element.match(l)]
return tree_lines
def split_trees(tree_lines):
re_artifact = re.compile(r'^\[INFO\] ([a-zA-Z_$][a-zA-Z\d_\-$]*\.)*[a-zA-Z_$][a-zA-Z\d_\-$]*:.+?:([a-zA-Z]+?):.+$')
trees = []
tree = None
for l in tree_lines:
if re_artifact.match(l):
if tree:
trees.append([tree['root']] + tree['deps'])
tree = {'root': l, 'deps': []}
else:
tree['deps'].append(l)
trees.append([tree['root']] + tree['deps'])
return trees
| 33.687898 | 156 | 0.560219 | 3,650 | 0.690112 | 125 | 0.023634 | 664 | 0.125544 | 0 | 0 | 948 | 0.17924 |
0527ccd6baf873620f163e0b3ed2a44bfa92eff6
| 1,812 |
py
|
Python
|
ptsites/sites/hares.py
|
kbnq/flexget_qbittorrent_mod
|
e52d9726b80aab94cf3d9ee6c382b6721b757d3b
|
[
"MIT"
] | null | null | null |
ptsites/sites/hares.py
|
kbnq/flexget_qbittorrent_mod
|
e52d9726b80aab94cf3d9ee6c382b6721b757d3b
|
[
"MIT"
] | null | null | null |
ptsites/sites/hares.py
|
kbnq/flexget_qbittorrent_mod
|
e52d9726b80aab94cf3d9ee6c382b6721b757d3b
|
[
"MIT"
] | null | null | null |
from ..schema.nexusphp import Attendance
from ..schema.site_base import Work, SignState
from ..utils.net_utils import NetUtils
class MainClass(Attendance):
URL = 'https://club.hares.top/'
USER_CLASSES = {
'downloaded': [8796093022208],
'share_ratio': [5.5],
'days': [364]
}
def build_workflow(self, entry, config):
return [
Work(
url='/attendance.php',
method='get',
succeed_regex=[
'这是您的第 \\d+ 次签到,已连续签到 \\d+ 天,本次签到获得 \\d+ 个奶糖。',
'已签到'
],
check_state=('final', SignState.SUCCEED),
is_base_content=True
)
]
def build_selector(self):
selector = super(MainClass, self).build_selector()
NetUtils.dict_merge(selector, {
'detail_sources': {
'default': {
'do_not_strip': True,
'link': '/userdetails.php?id={}',
'elements': {
'bar': 'ul.list-inline',
'table': 'div.layui-col-md10 > table:nth-child(1) > tbody'
}
}
},
'details': {
'points': {
'regex': '奶糖.*?([\\d,.]+)',
'handle': self.handle_points
},
'seeding': {
'regex': ('(做种中).*?(\\d+)', 2)
},
'leeching': {
'regex': ('(下载中).*?\\d+\\D+(\\d+)', 2)
},
'hr': None
}
})
return selector
def handle_points(self, value):
if value in ['.']:
return '0'
else:
return value
| 29.225806 | 82 | 0.400662 | 1,756 | 0.931071 | 0 | 0 | 0 | 0 | 0 | 0 | 498 | 0.264051 |
05299930cfe175dfdd505fa507a88544ad0e95c1
| 716 |
py
|
Python
|
tests/garage/sampler/test_rl2_worker.py
|
blacksph3re/garage
|
b4abe07f0fa9bac2cb70e4a3e315c2e7e5b08507
|
[
"MIT"
] | 1,500 |
2018-06-11T20:36:24.000Z
|
2022-03-31T08:29:01.000Z
|
tests/garage/sampler/test_rl2_worker.py
|
blacksph3re/garage
|
b4abe07f0fa9bac2cb70e4a3e315c2e7e5b08507
|
[
"MIT"
] | 2,111 |
2018-06-11T04:10:29.000Z
|
2022-03-26T14:41:32.000Z
|
tests/garage/sampler/test_rl2_worker.py
|
blacksph3re/garage
|
b4abe07f0fa9bac2cb70e4a3e315c2e7e5b08507
|
[
"MIT"
] | 309 |
2018-07-24T11:18:48.000Z
|
2022-03-30T16:19:48.000Z
|
from garage.envs import GymEnv
from garage.tf.algos.rl2 import RL2Worker
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv
from tests.fixtures.policies import DummyPolicy
class TestRL2Worker(TfGraphTestCase):
def test_rl2_worker(self):
env = GymEnv(DummyBoxEnv(obs_dim=(1, )))
policy = DummyPolicy(env_spec=env.spec)
worker = RL2Worker(seed=1,
max_episode_length=100,
worker_number=1,
n_episodes_per_trial=5)
worker.update_agent(policy)
worker.update_env(env)
episodes = worker.rollout()
assert episodes.rewards.shape[0] == 500
| 32.545455 | 50 | 0.655028 | 498 | 0.695531 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
052a76693b3fb6c307548d396e0accbc369737c8
| 660 |
py
|
Python
|
dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/uo_20001208.py
|
aleasims/Peach
|
bb56841e943d719d5101fee0a503ed34308eda04
|
[
"MIT"
] | null | null | null |
dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/uo_20001208.py
|
aleasims/Peach
|
bb56841e943d719d5101fee0a503ed34308eda04
|
[
"MIT"
] | null | null | null |
dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/uo_20001208.py
|
aleasims/Peach
|
bb56841e943d719d5101fee0a503ed34308eda04
|
[
"MIT"
] | 1 |
2020-07-26T03:57:45.000Z
|
2020-07-26T03:57:45.000Z
|
#Uche Ogbuji exercises format-number on Brad Marshall's behalf
from Xml.Xslt import test_harness
sheet_1 = """\
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0">
<xsl:template match = "/">
<xsl:value-of select='format-number(10000000000.75 + 10000000000.50, "##.##")'/>
</xsl:template>
</xsl:stylesheet>"""
#"
source_1 = "<spam/>"
expected_1 = """<?xml version="1.0" encoding="UTF-8"?>
20000000001.25"""
def Test(tester):
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_1)
test_harness.XsltTest(tester, source, [sheet], expected_1)
return
| 24.444444 | 84 | 0.671212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 387 | 0.586364 |
052bebc9ce249268deadd50cd183873b6f1a799a
| 2,697 |
py
|
Python
|
tests/test_connection.py
|
daniel-herrero/fastapi-mailman
|
a174d0ec777d3330dc5464f71fafa7829db07bf1
|
[
"MIT"
] | 6 |
2021-10-08T10:20:37.000Z
|
2022-03-30T08:56:10.000Z
|
tests/test_connection.py
|
daniel-herrero/fastapi-mailman
|
a174d0ec777d3330dc5464f71fafa7829db07bf1
|
[
"MIT"
] | 2 |
2021-11-11T11:44:29.000Z
|
2022-03-08T06:54:54.000Z
|
tests/test_connection.py
|
daniel-herrero/fastapi-mailman
|
a174d0ec777d3330dc5464f71fafa7829db07bf1
|
[
"MIT"
] | 1 |
2022-03-04T14:43:22.000Z
|
2022-03-04T14:43:22.000Z
|
import typing as t
import pytest as pt
from fastapi_mailman import BadHeaderError, EmailMessage
if t.TYPE_CHECKING:
from fastapi_mailman import Mail
@pt.mark.anyio
async def test_send_message(mail: "Mail"):
mail.backend = "locmem"
msg = EmailMessage(
subject="testing",
to=["[email protected]"],
body="testing",
)
await msg.send()
assert len(mail.outbox) == 1
sent_msg = mail.outbox[0]
assert sent_msg.from_email == mail.default_sender
@pt.mark.anyio
async def test_send_message_using_connection(mail: "Mail"):
async with mail.get_connection() as conn:
msg = EmailMessage(
subject="testing",
to=["[email protected]"],
body="testing",
connection=conn,
)
await msg.send()
assert len(mail.outbox) == 1
sent_msg = mail.outbox[0]
assert sent_msg.from_email == mail.default_sender
await conn.send_messages([msg])
assert len(mail.outbox) == 2
@pt.mark.anyio
async def test_send_single(mail: "Mail"):
async with mail.get_connection() as conn:
msg = EmailMessage(
subject="testing",
to=["[email protected]"],
body="testing",
connection=conn,
)
await msg.send()
assert len(mail.outbox) == 1
sent_msg = mail.outbox[0]
assert sent_msg.subject == "testing"
assert sent_msg.to == ["[email protected]"]
assert sent_msg.body == "testing"
assert sent_msg.from_email == mail.default_sender
@pt.mark.anyio
async def test_send_many(mail: "Mail"):
async with mail.get_connection() as conn:
msgs = []
for _ in range(10):
msg = EmailMessage(mailman=mail, subject="testing", to=["[email protected]"], body="testing")
msgs.append(msg)
await conn.send_messages(msgs)
assert len(mail.outbox) == 10
sent_msg = mail.outbox[0]
assert sent_msg.from_email == mail.default_sender
@pt.mark.anyio
async def test_send_without_sender(mail: "Mail"):
mail.default_sender = None
msg = EmailMessage(mailman=mail, subject="testing", to=["[email protected]"], body="testing")
await msg.send()
assert len(mail.outbox) == 1
sent_msg = mail.outbox[0]
assert sent_msg.from_email is None
@pt.mark.anyio
async def test_send_without_to(mail: "Mail"):
msg = EmailMessage(subject="testing", to=[], body="testing")
assert await msg.send() == 0
@pt.mark.anyio
async def test_bad_header_subject(mail):
msg = EmailMessage(subject="testing\n\r", body="testing", to=["[email protected]"])
with pt.raises(BadHeaderError):
await msg.send()
| 28.389474 | 102 | 0.629218 | 0 | 0 | 0 | 0 | 2,520 | 0.934372 | 2,415 | 0.895439 | 304 | 0.112718 |
052c8a3287a40e2446164e87ba133bbda46f1779
| 294 |
py
|
Python
|
Workshops/enBuyukSayi.py
|
brkyydnmz/Python
|
8cde0421edda6ac5b7fd30e8f20ad7cb6e1708b0
|
[
"MIT"
] | null | null | null |
Workshops/enBuyukSayi.py
|
brkyydnmz/Python
|
8cde0421edda6ac5b7fd30e8f20ad7cb6e1708b0
|
[
"MIT"
] | null | null | null |
Workshops/enBuyukSayi.py
|
brkyydnmz/Python
|
8cde0421edda6ac5b7fd30e8f20ad7cb6e1708b0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
sayi1 = int(input("1. Sayı:"))
sayi2 = int(input("2. Sayı:"))
sayi3 = int(input("3. Sayı:"))
if (sayi1>=sayi2) and (sayi1>=sayi3):
enBuyuk = sayi1
elif(sayi2>=sayi1) and (sayi2>=sayi3):
enBuyuk = sayi2
else:
enBuyuk = sayi3
print("En Büyük Sayı:",enBuyuk)
| 21 | 38 | 0.608844 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.25 |
052d317538142bae7b508c18b4e71450d9b3e276
| 399 |
py
|
Python
|
08/seven-segment_part1.py
|
ReinprechtStefan/AdventOfCode2021
|
a2750c5fbcc7fc927d710f4db6926d015a2fb673
|
[
"Apache-2.0"
] | null | null | null |
08/seven-segment_part1.py
|
ReinprechtStefan/AdventOfCode2021
|
a2750c5fbcc7fc927d710f4db6926d015a2fb673
|
[
"Apache-2.0"
] | null | null | null |
08/seven-segment_part1.py
|
ReinprechtStefan/AdventOfCode2021
|
a2750c5fbcc7fc927d710f4db6926d015a2fb673
|
[
"Apache-2.0"
] | null | null | null |
with open('input.txt') as f:
lines = f.readlines()
counter = 0
for line in lines:
right_part = line.split(" | ")[1]
for segment in right_part.strip().split(" "):
#print(segment, len(segment))
if len(segment) in [2,3,4,7]:
counter += 1
#else:
#print("NO ", segment, len(segment))
print(counter)
| 22.166667 | 53 | 0.491228 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 90 | 0.225564 |
052ffb78d4e1a7b366b635d756b5d2bbba48de18
| 7,605 |
py
|
Python
|
main/gui.py
|
MBmasher/weighted-object
|
eaaf25338240873b7c4197097b2bb73be256b702
|
[
"MIT"
] | null | null | null |
main/gui.py
|
MBmasher/weighted-object
|
eaaf25338240873b7c4197097b2bb73be256b702
|
[
"MIT"
] | null | null | null |
main/gui.py
|
MBmasher/weighted-object
|
eaaf25338240873b7c4197097b2bb73be256b702
|
[
"MIT"
] | null | null | null |
import Tkinter
import weighted_objects
import tkFileDialog
import time
import ttk
import numpy
import sys
while True:
# Ask user for file dialog.
Tkinter.Tk().withdraw()
osu_file_path = tkFileDialog.askopenfilename(title="Select an osu file", filetypes=(("osu files", "*.osu"),))
# Calculate final nerf.
final_nerf = weighted_objects.calculate_nerf(osu_file_path)
distance_snap_list = weighted_objects.weighted_distance_snap_list
time_list = weighted_objects.time_list
# Separate list into multiple lists when breaks exist.
time_break_separated_list = [[]]
list_number = 0
for i in range(len(time_list) - 1):
if time_list[i + 1] - time_list[i] > 3000:
# Create new list.
list_number += 1
time_break_separated_list.append([])
time_break_separated_list[list_number].append(time_list[i])
# Coordinates to be later used in the canvas.
canvas_distance_snap_list = []
canvas_time_list = []
# Calculating coordinates.
for i in time_list:
canvas_time_list.append(350 * (i - time_list[0]) / (time_list[-1] - time_list[0]))
for i in distance_snap_list:
canvas_distance_snap_list.append(150 - i * 75)
# Creating the GUI.
root = Tkinter.Tk()
root.resizable(width=False, height=False)
root.geometry("400x500")
root.title("Weighted Objects")
# Stuff for the timer.
ms = time_list[0]
note_number = 0
# Function to be used to initialize the timer.
def first_load():
# Variable relative_time is the time when the user has clicked the button to start timer.
global relative_time
relative_time = int(round(time.time() * 1000)) - time_list[0]
tick()
# Function to be used to run the timer.
def tick():
# Variable ms is the time that constantly goes up during the timer.
global ms
time_label.after(30, tick)
ms = int(round(time.time() * 1000)) - relative_time
time_label["text"] = "Timer: {}ms".format(ms)
update_labels()
draw_timer_line()
# Function to be used to update the labels that need constant updates.
def update_labels():
global note_number
# Updates note number depending on where the timer is at.
for i in range(len(time_list)):
if ms < time_list[i]:
note_number = i - 1
break
distance_snap_label["text"] = "Weighted: {:.2f}x".format(distance_snap_list[note_number])
progress_bar["value"] = distance_snap_list[note_number]
cumulative_label["text"] = "Cumulative Value: {}".format(numpy.cumsum(distance_snap_list)[note_number])
# Function to be used to draw the green line that indicates where the timer is at.
def draw_timer_line():
if ms < time_list[-1]:
draw_x = 350 * (ms - time_list[0]) / (time_list[-1] - time_list[0])
difficulty_graph.coords(timer_line, draw_x, 0, draw_x, 150)
# Function used to kill the GUI.
def stop():
root.quit()
root.destroy()
# Function used to kill the program entirely.
def kill():
sys.exit()
Tkinter.Label(root, fg="black",
text="Old Amount of Objects: {}".format(len(distance_snap_list))).pack()
Tkinter.Label(root, fg="black",
text="New Calculated Weighted Objects: {:.2f}".format(sum(distance_snap_list))).pack()
Tkinter.Label(root, fg="black",
text="Raw Percentage Change: {:.2f}%".format(100 * sum(distance_snap_list)
/ len(distance_snap_list))).pack()
Tkinter.Label(root, fg="black",
text="Calculated Nerf/Buff: {:.2f}%".format(100 * final_nerf)).pack()
Tkinter.Label(root, fg="blue", text="Graph of Distance Snap/Cumulative Sum of Distance Snap against Time").pack()
difficulty_graph = Tkinter.Canvas(root, width=350, height=150)
difficulty_graph.pack()
Tkinter.Label(root, fg="black", text="Red/Blue: Distance Snap").pack()
Tkinter.Label(root, fg="black", text="Yellow: Cumulative Sum of Distance Snap").pack()
# Draw grid lines and fill background
difficulty_graph.create_rectangle(0, 0, 350, 150, fill="#dddddd")
difficulty_graph.create_line(0, 30, 350, 30, fill="#cccccc")
difficulty_graph.create_line(0, 60, 350, 60, fill="#cccccc")
difficulty_graph.create_line(0, 90, 350, 90, fill="#cccccc")
difficulty_graph.create_line(0, 120, 350, 120, fill="#cccccc")
difficulty_graph.create_line(70, 0, 70, 150, fill="#cccccc")
difficulty_graph.create_line(140, 0, 140, 150, fill="#cccccc")
difficulty_graph.create_line(210, 0, 210, 150, fill="#cccccc")
difficulty_graph.create_line(280, 0, 280, 150, fill="#cccccc")
# Draw blue line graph, distance snap.
for i in range(len(distance_snap_list) - 1):
# Don't continue the graph if there is a break.
if time_list[i + 1] - time_list[i] < 3000:
difficulty_graph.create_line(canvas_time_list[i], canvas_distance_snap_list[i],
canvas_time_list[i + 1], canvas_distance_snap_list[i + 1],
fill="#9999ff")
# Draw red line graph, the average thing (what do you call this?).
for n in range(len(time_break_separated_list)):
for x in range(len(time_break_separated_list[n]) - 20):
if n == 0:
i = x
else:
i = x + numpy.cumsum(map(len, time_break_separated_list))[n - 1]
# Don't continue graph if there's a break.
if time_list[i + 11] - time_list[i + 10] < 3000:
difficulty_graph.create_line(canvas_time_list[i + 10],
sum(canvas_distance_snap_list[i:i + 20]) / 20.0,
canvas_time_list[i + 11],
sum(canvas_distance_snap_list[i + 1:i + 21]) / 20.0,
fill="#990000")
# Draw yellow line graph, cumulative distance snap sum.
for i in range(len(distance_snap_list) - 1):
difficulty_graph.create_line(canvas_time_list[i],
150 - (149 * numpy.cumsum(distance_snap_list)[i] / sum(distance_snap_list)),
canvas_time_list[i + 1],
150 - (149 * numpy.cumsum(distance_snap_list)[i + 1] / sum(distance_snap_list)),
fill="#ffff00")
timer_line = difficulty_graph.create_line(0, 0, 0, 150, fill="#77ff77")
time_label = Tkinter.Label(root, fg="black")
time_label.pack()
distance_snap_label = Tkinter.Label(root, fg="black")
distance_snap_label.pack()
cumulative_label = Tkinter.Label(root, fg="black")
cumulative_label.pack()
progress_bar = ttk.Progressbar(root, orient="horizontal", length=200, mode="determinate")
progress_bar.pack()
progress_bar["maximum"] = 2
Tkinter.Button(root, fg="blue", text="Start Realtime!", command=first_load).pack()
Tkinter.Button(root, fg="red", text="Choose another map", command=stop).pack()
# If window is closed, stop the program.
root.protocol("WM_DELETE_WINDOW", kill)
root.mainloop()
| 39.201031 | 118 | 0.598028 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,807 | 0.237607 |
05311a2863ffbf10e5b4872464958a44b018f474
| 2,929 |
py
|
Python
|
src/benchmark/probe_training_wrapper.py
|
dumpmemory/PEARL_v1
|
df46be5ed86ba7850486b578a8926aa151e7bf87
|
[
"MIT"
] | 24 |
2021-06-10T04:09:00.000Z
|
2021-11-02T11:23:35.000Z
|
src/benchmark/probe_training_wrapper.py
|
dumpmemory/PEARL_v1
|
df46be5ed86ba7850486b578a8926aa151e7bf87
|
[
"MIT"
] | 1 |
2021-06-08T15:27:38.000Z
|
2021-06-08T15:41:05.000Z
|
src/benchmark/probe_training_wrapper.py
|
dumpmemory/PEARL_v1
|
df46be5ed86ba7850486b578a8926aa151e7bf87
|
[
"MIT"
] | 4 |
2021-06-10T02:28:12.000Z
|
2021-08-24T13:00:14.000Z
|
from .probe import ProbeTrainer
# train using embeddings
def train_embeddings(encoder, probe_type, num_epochs, lr, patience, wandb, save_dir, batch_size,
tr_episodes, val_episodes, tr_labels, val_labels, test_episodes, test_labels, use_encoder=False, save_interval=100):
if use_encoder:
enc = encoder
else:
enc = None
probe_trainer = ProbeTrainer(encoder=enc,
epochs=num_epochs,
lr=lr,
batch_size=batch_size,
patience=patience,
wandb=wandb,
fully_supervised=False,
save_dir=save_dir,
representation_len=encoder.feature_size)
probe_trainer.train(tr_episodes, val_episodes,
tr_labels, val_labels, save_interval=save_interval)
final_accuracies, final_f1_scores = probe_trainer.test(test_episodes, test_labels)
wandb.log(final_accuracies)
wandb.log(final_f1_scores)
# train using images
def train_images(encoder, probe_type, num_epochs, lr, patience, wandb, save_dir, batch_size,
tr_episodes, val_episodes, tr_labels, val_labels,
test_episodes, test_labels, save_interval=100):
probe_trainer = ProbeTrainer(encoder=encoder,
epochs=num_epochs,
lr=lr,
batch_size=batch_size,
patience=patience,
wandb=wandb,
fully_supervised=False,
save_dir=save_dir,
representation_len=encoder.feature_size)
probe_trainer.train(tr_episodes, val_episodes,
tr_labels, val_labels, save_interval=save_interval)
final_accuracies, final_f1_scores = probe_trainer.test(test_episodes, test_labels)
wandb.log(final_accuracies)
wandb.log(final_f1_scores)
# main training method
def run_probe_training(training_input, encoder, probe_type, num_epochs, lr, patience, wandb, save_dir, batch_size,
tr_episodes, val_episodes, tr_labels, val_labels, test_episodes, test_labels, use_encoder=False, save_interval=100):
if training_input == 'embeddings':
train_embeddings(encoder, probe_type, num_epochs, lr, patience, wandb, save_dir, batch_size,
tr_episodes, val_episodes, tr_labels, val_labels, test_episodes, test_labels, use_encoder=use_encoder, save_interval=save_interval)
elif training_input == 'images':
train_images(encoder, probe_type, num_epochs, lr, patience, wandb, save_dir, batch_size,
tr_episodes, val_episodes, tr_labels, val_labels,
test_episodes, test_labels, save_interval=save_interval)
else:
print("Invalid input...choose either 'embeddings' and 'images'")
| 46.492063 | 148 | 0.635712 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 143 | 0.048822 |
0531675b4efb814c0c0505cc13c93cd557315404
| 1,310 |
py
|
Python
|
grr/server/grr_response_server/blob_stores/db_blob_store.py
|
oueldz4/grr
|
8c60d9198cc0875a8ea80b90237eb0a8272082ff
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/blob_stores/db_blob_store.py
|
oueldz4/grr
|
8c60d9198cc0875a8ea80b90237eb0a8272082ff
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/blob_stores/db_blob_store.py
|
oueldz4/grr
|
8c60d9198cc0875a8ea80b90237eb0a8272082ff
|
[
"Apache-2.0"
] | 1 |
2020-07-09T01:08:48.000Z
|
2020-07-09T01:08:48.000Z
|
#!/usr/bin/env python
"""REL_DB blobstore implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr_response_server import blob_store
from grr_response_server import data_store
class DbBlobStore(blob_store.BlobStore):
"""A REL_DB-based blob store implementation."""
# TODO(user): REL_DB can be None, because initialization is happening at some
# early but nondeterministic time. Once REL_DB is guaranteed to be not None,
# perform type checking that REL_DB.delegate is a BlobStore..
@property
def delegate(self):
return data_store.REL_DB.delegate
def WriteBlobs(self, blob_id_data_map):
return self.delegate.WriteBlobs(blob_id_data_map)
def ReadBlobs(self, blob_ids):
return self.delegate.ReadBlobs(blob_ids)
def ReadBlob(self, blob_id):
return self.delegate.ReadBlob(blob_id)
def CheckBlobsExist(self, blob_ids):
return self.delegate.CheckBlobsExist(blob_ids)
def CheckBlobExists(self, blob_id):
return self.delegate.CheckBlobExists(blob_id)
def WriteBlobsWithUnknownHashes(self, blobs_data):
return self.delegate.WriteBlobsWithUnknownHashes(blobs_data)
def WriteBlobWithUnknownHash(self, blob_data):
return self.delegate.WriteBlobWithUnknownHash(blob_data)
| 31.190476 | 79 | 0.79084 | 1,047 | 0.799237 | 0 | 0 | 69 | 0.052672 | 0 | 0 | 320 | 0.244275 |
053268be449fba403f273951c902bae23a8253b1
| 333 |
py
|
Python
|
tests/ut_repytests_loose-testnetmessportreuse.py
|
SeattleTestbed/repy_v1
|
f40a02e2e398b1ec67fede84b41a264ae7356d2c
|
[
"MIT"
] | 1 |
2021-08-18T05:58:17.000Z
|
2021-08-18T05:58:17.000Z
|
tests/ut_repytests_loose-testnetmessportreuse.py
|
SeattleTestbed/repy_v1
|
f40a02e2e398b1ec67fede84b41a264ae7356d2c
|
[
"MIT"
] | 3 |
2015-11-17T21:01:03.000Z
|
2016-07-14T09:08:04.000Z
|
tests/ut_repytests_loose-testnetmessportreuse.py
|
SeattleTestbed/repy_v1
|
f40a02e2e398b1ec67fede84b41a264ae7356d2c
|
[
"MIT"
] | 5 |
2015-07-02T13:29:23.000Z
|
2021-09-25T07:48:30.000Z
|
#pragma out
#pragma repy restrictions.loose
def foo(ip,port,mess, ch):
print ip,port,mess,ch
stopcomm(ch)
def noop(a,b,c,d):
pass
if callfunc == 'initialize':
ip = getmyip()
noopch = recvmess(ip,<messport>,noop)
recvmess(ip,<messport1>,foo)
sleep(.1)
sendmess(ip,<messport1>,'hi',ip,<messport>)
stopcomm(noopch)
| 18.5 | 45 | 0.678679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 58 | 0.174174 |
05343aca0c5c82c59e3358b3b9d65dce1ef6b0de
| 806 |
py
|
Python
|
pyzfscmds/check.py
|
johnramsden/pyzfscmds
|
b5d430ffd0454bc6b09e256aeea67164714d9809
|
[
"BSD-3-Clause"
] | 9 |
2018-07-08T20:01:33.000Z
|
2022-03-29T11:31:51.000Z
|
pyzfscmds/check.py
|
johnramsden/pyzfscmds
|
b5d430ffd0454bc6b09e256aeea67164714d9809
|
[
"BSD-3-Clause"
] | 1 |
2019-07-10T12:16:53.000Z
|
2019-07-10T12:16:53.000Z
|
pyzfscmds/check.py
|
johnramsden/pyzfscmds
|
b5d430ffd0454bc6b09e256aeea67164714d9809
|
[
"BSD-3-Clause"
] | 5 |
2018-06-04T02:33:43.000Z
|
2020-05-25T22:48:58.000Z
|
"""
Startup checks
"""
import subprocess
import pyzfscmds.system.agnostic as zfssys
def is_root_on_zfs():
"""Check if running root on ZFS"""
system = zfssys.check_valid_system()
if system is None:
raise RuntimeError(f"System is not yet supported by pyzfscmds\n")
root_dataset = None
if zfssys.zfs_module_loaded() and zpool_exists():
root_dataset = zfssys.mountpoint_dataset("/")
if root_dataset is None:
raise RuntimeError("System is not booting off a ZFS root dataset\n")
return True
def zpool_exists() -> bool:
try:
subprocess.check_call(["zpool", "get", "-H", "version"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except subprocess.CalledProcessError:
return False
return True
| 23.705882 | 77 | 0.66005 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 177 | 0.219603 |
0536d3d2cb26fae2a4bb43f1a3c0258c006ca24c
| 2,015 |
py
|
Python
|
dist.py
|
dladustn95/Dialogue_generator
|
004fa49e3140e6c7ceb14448604c8aa45966f70d
|
[
"MIT"
] | 4 |
2020-09-03T03:39:53.000Z
|
2021-08-25T03:53:41.000Z
|
dist.py
|
dladustn95/Dialogue_generator
|
004fa49e3140e6c7ceb14448604c8aa45966f70d
|
[
"MIT"
] | null | null | null |
dist.py
|
dladustn95/Dialogue_generator
|
004fa49e3140e6c7ceb14448604c8aa45966f70d
|
[
"MIT"
] | 1 |
2020-09-04T07:04:50.000Z
|
2020-09-04T07:04:50.000Z
|
import sys
def distinct_1(path):
inFile = open(path, mode="r", encoding="utf8")
char_set = set()
all_unigram_count = 0
for line in inFile.readlines():
line = line.strip().split(" ")
for word in line:
char_set.add(word)
all_unigram_count += len(line)
distinct_unigram_count = len(char_set)
print("distinct_unigram: ", distinct_unigram_count)
print("all_unigram: ", all_unigram_count)
print("distinct 1: " + str(distinct_unigram_count / all_unigram_count))
inFile.close()
return distinct_unigram_count / all_unigram_count
sp="#####"
def distinct_2(path):
inFile = open(path, mode="r", encoding="utf8")
bichar_set = set()
all_bigram_count = 0
for line in inFile.readlines():
line = line.strip().split(" ")
char_len = len(line)
for idx in range(char_len - 1):
bichar_set.add(line[idx] + sp + line[idx + 1])
all_bigram_count += (char_len - 1)
distinct_bigram_count = len(bichar_set)
print("distinct_bigram: ", distinct_bigram_count)
print("all_bigram: ", all_bigram_count)
print("distinct 2: " + str(distinct_bigram_count / all_bigram_count))
inFile.close()
return distinct_bigram_count / all_bigram_count
def distinct_3(path):
inFile = open(path, mode="r", encoding="utf8")
bichar_set = set()
all_bigram_count = 0
for line in inFile.readlines():
line = line.strip().split(" ")
char_len = len(line)
for idx in range(char_len - 2):
bichar_set.add(line[idx] + sp + line[idx + 1] + sp + line[idx + 2])
all_bigram_count += (char_len -2)
distinct_bigram_count = len(bichar_set)
print("distinct_trigram: ", distinct_bigram_count)
print("all_trigram: ", all_bigram_count)
print("distinct 3: " + str(distinct_bigram_count / all_bigram_count))
inFile.close()
return distinct_bigram_count / all_bigram_count
distinct_1(sys.argv[1])
distinct_2(sys.argv[1])
distinct_3(sys.argv[1])
| 34.152542 | 79 | 0.655583 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 188 | 0.0933 |
0537e1ab85799850e99a5e3c6bb0f22f481e1ab8
| 5,036 |
py
|
Python
|
Scripts/plot_PolarVortexStrength_PDFs.py
|
zmlabe/StratoVari
|
c5549f54482a2b05e89bded3e3b0b3c9faa686f3
|
[
"MIT"
] | 4 |
2019-11-23T19:44:21.000Z
|
2020-02-20T16:54:45.000Z
|
Scripts/plot_PolarVortexStrength_PDFs.py
|
zmlabe/StratoVari
|
c5549f54482a2b05e89bded3e3b0b3c9faa686f3
|
[
"MIT"
] | null | null | null |
Scripts/plot_PolarVortexStrength_PDFs.py
|
zmlabe/StratoVari
|
c5549f54482a2b05e89bded3e3b0b3c9faa686f3
|
[
"MIT"
] | 2 |
2019-06-21T19:27:55.000Z
|
2021-02-12T19:13:22.000Z
|
"""
Calculate PDFs for polar vortex response
Notes
-----
Author : Zachary Labe
Date : 25 June 2019
"""
### Import modules
import numpy as np
import matplotlib.pyplot as plt
import datetime
import read_MonthlyData as MO
import calc_Utilities as UT
import cmocean
import scipy.stats as sts
### Define directories
directorydata = '/seley/zlabe/simu/'
directoryfigure = '/home/zlabe/Desktop/STRATOVARI/'
### Define time
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
titletime = currentmn + '/' + currentdy + '/' + currentyr
print('\n' '----Plotting PDF Polar Vortex Subsamples- %s----' % titletime)
### Alott time series (300 ensemble members)
year1 = 1701
year2 = 2000
years = np.arange(year1,year2+1,1)
###############################################################################
###############################################################################
###############################################################################
### Call arguments
varnames = ['U10']
period = 'JFM' # Enter temporal period (DJF,JFM,JFMA,ND)
simuh = 'Past' # Enter simulation time (Current,Past)
letters = [r'Mean',r'A',r'B',r'C']
###############################################################################
if simuh == 'Current':
simuq = 'Cu'
elif simuh == 'Past':
simuq = 'Pi'
else:
print(ValueError('Wrong simulation selected!'))
###############################################################################
###############################################################################
###############################################################################
### Call function for 4d variable data
lat,lon,lev,varfuture = MO.readExperiAll(varnames[0],'Future','surface')
lat,lon,lev,varpast = MO.readExperiAll(varnames[0],simuh,'surface')
### Create 2d array of latitude and longitude
lon2,lat2 = np.meshgrid(lon,lat)
### List of experiments
runs = [varfuture,varpast]
### Separate per monthly periods
if period == 'DJF':
varmo = np.empty((len(runs),varpast.shape[0]-1,varpast.shape[2],
varpast.shape[3]))
for i in range(len(runs)):
varmo[i,:,:,:] = UT.calcDecJanFeb(runs[i],runs[i],lat,
lon,'surface',17)
elif period == 'JFM':
varmo = np.empty((len(runs),varpast.shape[0],varpast.shape[2],
varpast.shape[3]))
for i in range(len(runs)):
varmo[i,:,:,:] = np.nanmean(runs[i][:,:3,:,:],axis=1)
elif period == 'JFMA':
varmo = np.empty((len(runs),varpast.shape[0],varpast.shape[2],
varpast.shape[3]))
for i in range(len(runs)):
varmo[i,:,:,:] = np.nanmean(runs[i][:,:4,:,:],axis=1)
elif period == 'ND':
varmo = np.empty((len(runs),varpast.shape[0],varpast.shape[2],
varpast.shape[3]))
for i in range(len(runs)):
varmo[i,:,:,:] = np.nanmean(runs[i][:,-2:,:,:],axis=1)
else:
ValueError('Wrong period selected! (DJF,JFM,JFMA,ND)')
### Remove missing data
varmo[np.where(varmo < -1e10)] = np.nan
###############################################################################
###############################################################################
###############################################################################
### Slice data for 60N
latq = np.where((lat >= 59.5) & (lat <= 60.5))[0]
latu = lat[latq].squeeze()
varmou = varmo[:,:,latq,:].squeeze()
### Calculate zonal mean
varmoz = np.nanmean(varmou[:,:,:],axis=2)
### Calculate anomalies
anom = varmoz[0,:] - varmoz[1,:]
### Remove nans
mask = ~np.isnan(anom)
anom = anom[mask]
### Fit a distribution
num_bins = np.arange(-50,50,1)
mA,sA = sts.norm.fit(anom[:100])
mB,sB = sts.norm.fit(anom[100:200])
mC,sC = sts.norm.fit(anom[200:])
mm,sm = sts.norm.fit(anom[:])
A = sts.norm.pdf(num_bins,mA,sA)
B = sts.norm.pdf(num_bins,mB,sB)
C = sts.norm.pdf(num_bins,mC,sC)
meann = sts.norm.pdf(num_bins,mm,sm)
plt.figure()
plt.plot(num_bins,A,color='darkblue',linewidth=2.0,label=r'A')
plt.plot(num_bins,B,color='darkgreen',linewidth=2.0,label=r'B')
plt.plot(num_bins,C,color='darkorange',linewidth=2.0,label=r'C')
plt.plot(num_bins,meann,color='k',linewidth=2.0,label=r'Mean',
linestyle='--',dashes=(1,0.3))
l = plt.legend(shadow=False,fontsize=7,loc='upper left',
fancybox=True,frameon=False,ncol=1,bbox_to_anchor=(0.72,1),
labelspacing=0.2,columnspacing=1,handletextpad=0.4)
for text in l.get_texts():
text.set_color('k')
### Statistical tests on distribution
tA,pA = sts.ks_2samp(A,meann)
tB,pB = sts.ks_2samp(B,meann)
tC,pC = sts.ks_2samp(C,meann)
print('\n\nP-value between A and mean --> %s!' % np.round(pA,4))
print('P-value between B and mean --> %s!' % np.round(pB,4))
print('P-value between C and mean --> %s!' % np.round(pC,4))
plt.savefig(directoryfigure + 'PDFs_PolarVortex_%s_%s.png' % \
(period,simuh),dpi=300)
| 34.027027 | 79 | 0.53475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,931 | 0.383439 |
053869e3d79166cc0d895c117eef19a63bd977af
| 906 |
py
|
Python
|
test/test_airtunnel/operators/test_sql_helpers.py
|
joerg-schneider/airflow-bootstrap
|
bbed0a2d5addd0dd6221b75c06982f47e0d837d4
|
[
"MIT"
] | 23 |
2019-09-30T15:22:58.000Z
|
2021-04-09T10:53:23.000Z
|
test/test_airtunnel/operators/test_sql_helpers.py
|
joerg-schneider/airflow-bootstrap
|
bbed0a2d5addd0dd6221b75c06982f47e0d837d4
|
[
"MIT"
] | 1 |
2019-11-24T18:37:56.000Z
|
2019-11-24T18:37:56.000Z
|
test/test_airtunnel/operators/test_sql_helpers.py
|
joerg-schneider/airflow-bootstrap
|
bbed0a2d5addd0dd6221b75c06982f47e0d837d4
|
[
"MIT"
] | 4 |
2020-01-14T03:31:34.000Z
|
2021-05-07T21:34:22.000Z
|
import pytest
from airtunnel.operators.sql import sql_helpers
TEST_SCRIPT = "ddl/test_schema/test_table.sql"
@pytest.mark.parametrize(
argnames=("sql_path",),
argvalues=((TEST_SCRIPT,), ("/" + TEST_SCRIPT,), ((TEST_SCRIPT,),)),
)
def test_load_sql_script(sql_path: str):
# load with a single relative path
s = sql_helpers.load_sql_script(sql_path)
assert len(s) > 50
def test_split_sql_script():
sql_helpers.split_sql_script(sql_helpers.load_sql_script(TEST_SCRIPT))
def test_format_sql_script():
sql_helpers.format_sql_script(
sql_script=sql_helpers.load_sql_script(TEST_SCRIPT),
sql_params_dict={"idx_name": "i1", "idx_col": "c1"},
)
def test_prepare_sql_params(fake_airflow_context):
sql_helpers.prepare_sql_params(
compute_sql_params_function=lambda f: {"x": f["task_instance"]},
airflow_context=fake_airflow_context,
)
| 26.647059 | 74 | 0.728477 | 0 | 0 | 0 | 0 | 277 | 0.30574 | 0 | 0 | 124 | 0.136865 |
053914ae8ca6bed144522d26cba1f2a52c6014f5
| 2,582 |
py
|
Python
|
EE475/Ch6P13.py
|
PhoeniXuzoo/NU-Projects
|
a217ad46e6876ceffb3dec1d6e52f775674b2e8b
|
[
"MIT"
] | null | null | null |
EE475/Ch6P13.py
|
PhoeniXuzoo/NU-Projects
|
a217ad46e6876ceffb3dec1d6e52f775674b2e8b
|
[
"MIT"
] | null | null | null |
EE475/Ch6P13.py
|
PhoeniXuzoo/NU-Projects
|
a217ad46e6876ceffb3dec1d6e52f775674b2e8b
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
## softmax: 0.1 600
## perceptron: 0.05 550
def readData(csvname):
data = np.loadtxt(csvname, delimiter=',')
x = data[:-1, :]
y = data[-1:, :]
return x, y
def softmaxCostFunc(x, y, w):
cost = np.sum(np.log(1 + np.exp(-y*np.transpose(np.dot(np.transpose(x), w)))))
return cost / float(np.size(y))
def gradientDescentOneStepForSoftmax(x, y, w, alpha=0.1):
total = np.zeros([9,1])
for i in range(np.size(y)):
power = np.exp(-y[:,i] * np.dot(x[:,i], w))
term = power / (1 + power)
total += term * y[:,i] * x[:,[i]]
w = w + alpha * (1/np.size(y)) * total
return w
def perceptronCostFunc(x, y, w):
cost = 0
a = (-y*np.transpose(np.dot(np.transpose(x), w)))[0]
for i in range(len(a)):
cost += a[i] if (a[i] > 0) else 0
return cost / float(np.size(y))
def gradientDescentOneStepForPerceptron(x, y, w, alpha=0.05):
total = np.zeros([9,1])
for i in range(np.size(y)):
term = -y[:,i] * np.dot(x[:,[i]].T, w)
total += 0 if term <= 0 else -y[:,i] * x[:,[i]]
w = w - alpha * (1/np.size(y)) * total
return w
if __name__ == "__main__":
csvname = 'breast_cancer_data.csv'
x, y = readData(csvname)
w = np.ones([x.shape[0] + 1, 1])
x = np.insert(x, 0, values=np.ones([1, x.shape[1]]), axis=0)
xSoftList = [0]
ySoftList = [softmaxCostFunc(x, y, w)]
for i in range(600):
w = gradientDescentOneStepForSoftmax(x, y, w)
xSoftList.append(i+1)
ySoftList.append(softmaxCostFunc(x, y, w))
yPredic = np.transpose(np.dot(np.transpose(x), w))
wrong = 0
for i in range(np.size(yPredic)):
if ((yPredic[0][i] > 0) != (y[0][i] > 0)):
wrong += 1
print("Softmax Wrong Prediction: ", wrong)
w = np.ones([x.shape[0], 1])
xPerceptronList = [0]
yPerceptronList = [perceptronCostFunc(x, y, w)]
for i in range(550):
w = gradientDescentOneStepForPerceptron(x, y, w)
xPerceptronList.append(i+1)
yPerceptronList.append(perceptronCostFunc(x, y, w))
plt.plot(xSoftList, ySoftList, label="Softmax Cost Function",color="#F08080")
plt.plot(xPerceptronList, yPerceptronList, label="Perceptro Cost Function")
plt.legend(loc="upper right")
plt.show()
plt.close()
yPredic = np.transpose(np.dot(np.transpose(x), w))
wrong = 0
for i in range(np.size(yPredic)):
if ((yPredic[0][i] > 0) != (y[0][i] > 0)):
wrong += 1
print("Perceptron Wrong Prediction: ", wrong)
| 29.011236 | 82 | 0.573199 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 208 | 0.080558 |
05399638e32621d9f8eab1ecc185a769af934b80
| 416 |
py
|
Python
|
square.py
|
Formalhalt/Phyton-Calculators
|
25f686e45a8333e9a141568c8f695350bde36bc6
|
[
"CC0-1.0"
] | null | null | null |
square.py
|
Formalhalt/Phyton-Calculators
|
25f686e45a8333e9a141568c8f695350bde36bc6
|
[
"CC0-1.0"
] | null | null | null |
square.py
|
Formalhalt/Phyton-Calculators
|
25f686e45a8333e9a141568c8f695350bde36bc6
|
[
"CC0-1.0"
] | null | null | null |
height = float(input("Enter height of the square: "))
width = float(input("Enter width of the Square: "))
perimeter = (2 * height) + (2 * width)
area = height * height
print("The perimeter of the square is", perimeter)
print("The area of the square is", area)
close = input("Press X to exit")
# The above line of code keeps the program open for the user to see the outcome of the problem.
| 23.111111 | 96 | 0.663462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 231 | 0.555288 |
053b161da791d51b0f7c77d904ccb2a6a0472da3
| 6,492 |
py
|
Python
|
dls7-1[cnn].py
|
cutz-j/computer_vision
|
23408231221bb16539ea1964f000bdbb7f9e7e20
|
[
"MIT"
] | null | null | null |
dls7-1[cnn].py
|
cutz-j/computer_vision
|
23408231221bb16539ea1964f000bdbb7f9e7e20
|
[
"MIT"
] | null | null | null |
dls7-1[cnn].py
|
cutz-j/computer_vision
|
23408231221bb16539ea1964f000bdbb7f9e7e20
|
[
"MIT"
] | null | null | null |
import numpy as np
from common.util import im2col
from collections import OrderedDict
from common.layers import *
from common.gradient import numerical_gradient
## 4차원 배열 ##
x = np.random.rand(10, 1, 28, 28) # (m, c, h, w)
print(x.shape)
x[0].shape # data 1개
x[0, 0] # 첫 data의 channel 공간
x1 = np.random.rand(1, 3, 7, 7)
col1 = im2col(x1, 5, 5, stride=1, pad=0)
print(col1.shape)
x2 = np.random.rand(10, 3, 7 ,7)
col2 = im2col(x2, 5, 5)
print(col2.shape)
class Convolution:
def __init__(self, W, b, stride=1, pad=0):
# 초기화 #
self.W = W
self.b = b
self.stride = stride
self.pad = pad
def forward(self, x):
# conv forward #
# im2col을 통해 np.dot 행렬곱 연산 수행 #
FN, C, FH, FW = self.W.Shape # filter의 shape 기록
N, C, H, W = x.shape # input data의 shape
out_h = int(1 + (H + 2*self.pad - FH) / self.stride)
out_w = int(1 + (W + 2*self.pad - FW) / self.stride) # 공식
col = im2col(x, FH, FW, self.stride, self.pad) # 2차원
col_W = self.W.reshape(FN, -1).T # 행렬 연산을 위한 reshape --> 2차원
out = np.dot(col, col_W) + self.b # 연산
out = out.reshape(N, out_h, out_w, -1).transpose(0, 3, 1, 2)
class Pooling:
def __init__(self, pool_h, pool_w, stride=1, pad=0):
self.pool_h = pool_h
self.pool_w = pool_w
self.stride = stride
self.pad = pad
def forward(self, x):
N, C, H, W = x.shape
out_h = int(1 + (H - self.pool_h) / self.stride)
out_w = int(1 + (W - self.pool_w) / self.stride)
col = im2col(x, self.pool_h, self.pool_w, self.stride, self.pad)
col = col.reshape(-1, self.pool_h * self.pool_w)
out = np.max(col, axis=1)
out = out.reshape(N, out_h, out_w, C).transpose(0, 3, 1, 2)
return out
class SimpleConvNet:
"""단순한 합성곱 신경망
conv - relu - pool - affine - relu - affine - softmax
Parameters
----------
input_size : 입력 크기(MNIST의 경우엔 784)
hidden_size_list : 각 은닉층의 뉴런 수를 담은 리스트(e.g. [100, 100, 100])
output_size : 출력 크기(MNIST의 경우엔 10)
activation : 활성화 함수 - 'relu' 혹은 'sigmoid'
weight_init_std : 가중치의 표준편차 지정(e.g. 0.01)
'relu'나 'he'로 지정하면 'He 초깃값'으로 설정
'sigmoid'나 'xavier'로 지정하면 'Xavier 초깃값'으로 설정
"""
def __init__(self, input_dim=(1, 28, 28),
conv_param={'filter_num':30, 'filter_size':5, 'pad':0, 'stride':1},
hidden_size=100, output_size=10, weight_init_std=0.01):
filter_num = conv_param['filter_num']
filter_size = conv_param['filter_size']
filter_pad = conv_param['pad']
filter_stride = conv_param['stride']
input_size = input_dim[1]
conv_output_size = (input_size - filter_size + 2*filter_pad) / filter_stride + 1
pool_output_size = int(filter_num * (conv_output_size/2) * (conv_output_size/2))
# 가중치 초기화
self.params = {}
self.params['W1'] = weight_init_std * \
np.random.randn(filter_num, input_dim[0], filter_size, filter_size)
self.params['b1'] = np.zeros(filter_num)
self.params['W2'] = weight_init_std * \
np.random.randn(pool_output_size, hidden_size)
self.params['b2'] = np.zeros(hidden_size)
self.params['W3'] = weight_init_std * \
np.random.randn(hidden_size, output_size)
self.params['b3'] = np.zeros(output_size)
# 계층 생성
self.layers = OrderedDict()
self.layers['Conv1'] = Convolution(self.params['W1'], self.params['b1'],
conv_param['stride'], conv_param['pad'])
self.layers['Relu1'] = Relu()
self.layers['Pool1'] = Pooling(pool_h=2, pool_w=2, stride=2)
self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2'])
self.layers['Relu2'] = Relu()
self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3'])
self.last_layer = SoftmaxWithLoss()
def predict(self, x):
for layer in self.layers.values():
x = layer.forward(x)
return x
def loss(self, x, t):
"""손실 함수를 구한다.
Parameters
----------
x : 입력 데이터
t : 정답 레이블
"""
y = self.predict(x)
return self.last_layer.forward(y, t)
def accuracy(self, x, t, batch_size=100):
if t.ndim != 1 : t = np.argmax(t, axis=1)
acc = 0.0
for i in range(int(x.shape[0] / batch_size)):
tx = x[i*batch_size:(i+1)*batch_size]
tt = t[i*batch_size:(i+1)*batch_size]
y = self.predict(tx)
y = np.argmax(y, axis=1)
acc += np.sum(y == tt)
return acc / x.shape[0]
def numerical_gradient(self, x, t):
"""기울기를 구한다(수치미분).
Parameters
----------
x : 입력 데이터
t : 정답 레이블
Returns
-------
각 층의 기울기를 담은 사전(dictionary) 변수
grads['W1']、grads['W2']、... 각 층의 가중치
grads['b1']、grads['b2']、... 각 층의 편향
"""
loss_w = lambda w: self.loss(x, t)
grads = {}
for idx in (1, 2, 3):
grads['W' + str(idx)] = numerical_gradient(loss_w, self.params['W' + str(idx)])
grads['b' + str(idx)] = numerical_gradient(loss_w, self.params['b' + str(idx)])
return grads
def gradient(self, x, t):
"""기울기를 구한다(오차역전파법).
Parameters
----------
x : 입력 데이터
t : 정답 레이블
Returns
-------
각 층의 기울기를 담은 사전(dictionary) 변수
grads['W1']、grads['W2']、... 각 층의 가중치
grads['b1']、grads['b2']、... 각 층의 편향
"""
# forward
self.loss(x, t)
# backward
dout = 1
dout = self.last_layer.backward(dout)
layers = list(self.layers.values())
layers.reverse()
for layer in layers:
dout = layer.backward(dout)
# 결과 저장
grads = {}
grads['W1'], grads['b1'] = self.layers['Conv1'].dW, self.layers['Conv1'].db
grads['W2'], grads['b2'] = self.layers['Affine1'].dW, self.layers['Affine1'].db
grads['W3'], grads['b3'] = self.layers['Affine2'].dW, self.layers['Affine2'].db
return grads
| 31.211538 | 95 | 0.518792 | 3,852 | 0.548562 | 0 | 0 | 0 | 0 | 0 | 0 | 2,127 | 0.302905 |
053b82076a707b4cf23d2d9a676fce87856d471c
| 17,136 |
py
|
Python
|
tests/test_crawler.py
|
jacopoabbate/datavault-api-python-client
|
70c3113b56db77de3835b4210dd7bffb22b34c9f
|
[
"MIT"
] | null | null | null |
tests/test_crawler.py
|
jacopoabbate/datavault-api-python-client
|
70c3113b56db77de3835b4210dd7bffb22b34c9f
|
[
"MIT"
] | null | null | null |
tests/test_crawler.py
|
jacopoabbate/datavault-api-python-client
|
70c3113b56db77de3835b4210dd7bffb22b34c9f
|
[
"MIT"
] | null | null | null |
import datetime
import pytest
import requests
from datavault_api_client import crawler
from datavault_api_client.data_structures import DiscoveredFileInfo
class TestCleanRawFilename:
@pytest.mark.parametrize(
"datavault_assigned_name, expected_cleaned_filename", [
("WATCHLIST_username_676_20200610.txt.bz2", "WATCHLIST_676_20200610.txt.bz2"),
("CROSSREF_903_20200610.txt.bz2", "CROSSREF_903_20200610.txt.bz2"),
("COREREF_945_20200610.txt.bz2", "COREREF_945_20200610.txt.bz2"),
("REPLAY_794_20200316.txt.bz2", "REPLAY_794_20200316.txt.bz2"),
("SEDOL_794_20200316.txt.bz2", "SEDOL_794_20200316.txt.bz2"),
("CUSIP_794_20200316.txt.bz2", "CUSIP_794_20200316.txt.bz2"),
("PREMREF_794_20200316.txt.bz2", "PREMREF_794_20200316.txt.bz2"),
],
)
def test_name_cleaning(self, datavault_assigned_name, expected_cleaned_filename):
# Setup - none
# Exercise
cleaned_filename = crawler.clean_raw_filename(datavault_assigned_name)
# Verify
assert cleaned_filename == expected_cleaned_filename
# Cleanup - none
class TestParseSourceFromName:
def test_source_id_parser(self):
# Setup
filename_to_parse = "WATCHLIST_945_20201201.txt.bz2"
# Exercise
parsed_source_id = crawler.parse_source_from_name(filename_to_parse)
# Verify
expected_source_id = "945"
assert parsed_source_id == expected_source_id
# Cleanup - none
class TestParseReferenceDate:
def test_reference_date_parser(self):
# Setup
filename_to_parse = "WATCHLIST_945_20201201.txt.bz2"
# Exercise
parsed_reference_date = crawler.parse_reference_date(filename_to_parse)
# Verify
expected_reference_date = datetime.datetime(year=2020, month=12, day=1)
assert parsed_reference_date == expected_reference_date
# Cleanup - none
class TestCreateDiscoveredFileObject:
def test_discovered_file_object_creation(self):
# Setup
file_node = {
'name': 'WATCHLIST_accountname_945_20201130.txt.bz2',
'fid': '20201130-S945_WATCHLIST_accountname_0_0',
'parent': '/v2/list/2020/11/30/S945/WATCHLIST',
'url': '/v2/data/2020/11/30/S945/WATCHLIST/20201130-S945_WATCHLIST_accountname_0_0',
'size': 78994869,
'md5sum': 'bf703f867cad0b414d84fac0c9bfe0e5',
'createdAt': '2020-11-30T23:22:36',
'updatedAt': '2020-11-30T23:22:36',
'writable': False,
'directory': False
}
# Exercise
created_discovered_file_object = crawler.create_discovered_file_object(file_node)
# Verify
expected_discovered_file_object = DiscoveredFileInfo(
file_name='WATCHLIST_945_20201130.txt.bz2',
download_url=(
"https://api.icedatavault.icedataservices.com/v2/data/2020/11/30/S945/WATCHLIST/"
"20201130-S945_WATCHLIST_accountname_0_0"
),
source_id=945,
reference_date=datetime.datetime(year=2020, month=11, day=30),
size=78994869,
md5sum="bf703f867cad0b414d84fac0c9bfe0e5",
)
assert created_discovered_file_object == expected_discovered_file_object
# Cleanup - none
class TestInitializeSearch:
def test_initialization_of_search_from_instrument_url(
self,
mocked_datavault_api_instrument_level,
):
# Setup
session = requests.Session()
url = "https://api.icedatavault.icedataservices.com/v2/list/2020/07/16/S367/WATCHLIST"
credentials = ("username", "password")
# Exercise
stack, leaf_nodes = crawler.initialise_search(url, credentials, session)
# Verify
expected_stack = []
expected_leaf_nodes = [
DiscoveredFileInfo(
file_name="WATCHLIST_367_20200716.txt.bz2",
download_url=(
"https://api.icedatavault.icedataservices.com/v2/data/2020/07/16/S367/"
"WATCHLIST/20200716-S367_WATCHLIST_username_0_0"
),
source_id=367,
reference_date=datetime.datetime(year=2020, month=7, day=16),
size=100145874,
md5sum="fb34325ec9262adc74c945a9e7c9b465",
)
]
assert stack == expected_stack
assert leaf_nodes == expected_leaf_nodes
# Cleanup - none
def test_initialisation_of_search_from_instrument_url_and_not_matching_source_id(
self,
mocked_datavault_api_instrument_level,
):
# Setup
session = requests.Session()
url = "https://api.icedatavault.icedataservices.com/v2/list/2020/07/16/S367/WATCHLIST"
credentials = ("username", "password")
source_id = 945
# Exercise
stack, leaf_nodes = crawler.initialise_search(url, credentials, session, source_id)
# Verify
assert stack == []
assert leaf_nodes == []
# Cleanup - none
def test_initialisation_of_search_from_instrument_url_and_matching_source_id(
self,
mocked_datavault_api_instrument_level,
):
# Setup
session = requests.Session()
url = "https://api.icedatavault.icedataservices.com/v2/list/2020/07/16/S367/WATCHLIST"
credentials = ("username", "password")
source_id = 367
# Exercise
stack, leaf_nodes = crawler.initialise_search(url, credentials, session, source_id)
# Verify
assert stack == []
assert leaf_nodes == [
DiscoveredFileInfo(
file_name="WATCHLIST_367_20200716.txt.bz2",
download_url=(
"https://api.icedatavault.icedataservices.com/v2/data/2020/07/16/S367/"
"WATCHLIST/20200716-S367_WATCHLIST_username_0_0"
),
source_id=367,
reference_date=datetime.datetime(year=2020, month=7, day=16),
size=100145874,
md5sum="fb34325ec9262adc74c945a9e7c9b465",
)
]
# Cleanup - none
def test_initialisation_of_search_from_top_level(
self,
mocked_top_level_datavault_api,
):
# Setup
session = requests.Session()
url = "https://api.icedatavault.icedataservices.com/v2/list"
credentials = ("username", "password")
# Exercise
stack, leaf_nodes = crawler.initialise_search(url, credentials, session)
# Verify
expected_stack = [
{
'name': '2020',
'parent': '/v2/list',
'url': '/v2/list/2020',
'size': 0,
'createdAt': '2020-01-01T00:00:00',
'updatedAt': '2020-12-01T00:00:00',
'writable': False,
'directory': True
},
]
expected_leaf_nodes = []
assert stack == expected_stack
assert leaf_nodes == expected_leaf_nodes
def test_initialisation_behaviour_with_error_code(
self,
mocked_top_level_datavault_api_failed_request,
):
# Setup
session = requests.Session()
url = "https://api.icedatavault.icedataservices.com/v2/list"
credentials = ("username", "password")
# Exercise
with pytest.raises(requests.exceptions.HTTPError):
crawler.initialise_search(url, credentials, session)
class TestCreateNodeUrl:
def test_creation_of_node_url(self):
# Setup
url_path = "v2/list/2020/11/30/S945"
# Exercise
node_url = crawler.create_node_url(url_path)
# Verify
expected_url = "https://api.icedatavault.icedataservices.com/v2/list/2020/11/30/S945"
assert node_url == expected_url
# Cleanup - none
class TestTraverseApiDirectoryTree:
def test_traversal_of_api_directory_tree(
self,
mocked_datavault_api_single_source_single_day,
mocked_files_available_to_download_single_source_single_day,
):
# Setup
session = requests.Session()
url = "https://api.icedatavault.icedataservices.com/v2/list"
credentials = ("username", "password")
leaf_nodes = []
# Exercise
discovered_files = crawler.traverse_api_directory_tree(
session,
credentials,
session.get(url).json(),
leaf_nodes
)
# Verify
expected_files = mocked_files_available_to_download_single_source_single_day
assert (
discovered_files.sort(key=lambda x: x.file_name)
== expected_files.sort(key=lambda x: x.file_name)
)
# Cleanup - none
def test_traversal_of_api_directory_tree_with_not_matching_source_id(
self,
mocked_datavault_api_single_source_single_day,
):
# Setup
session = requests.Session()
url = "https://api.icedatavault.icedataservices.com/v2/list"
credentials = ("username", "password")
leaf_nodes = []
source_id = 673
# Exercise
discovered_files = crawler.traverse_api_directory_tree(
session,
credentials,
session.get(url).json(),
leaf_nodes,
source_id
)
# Verify
assert discovered_files == []
# Cleanup - none
def test_traversal_of_api_directory_tree_with_matching_source_id(
self,
mocked_datavault_api_multiple_sources_single_day,
mocked_files_available_to_download_multiple_sources_single_day,
):
# Setup
session = requests.Session()
url = "https://api.icedatavault.icedataservices.com/v2/list"
credentials = ("username", "password")
leaf_nodes = []
source_id = 367
# Exercise
discovered_files = crawler.traverse_api_directory_tree(
session,
credentials,
session.get(url).json(),
leaf_nodes,
source_id
)
discovered_files.sort(key=lambda x: x.file_name)
# Verify
expected_files = [
file for file in mocked_files_available_to_download_multiple_sources_single_day
if file.source_id == int(source_id)
]
expected_files.sort(key=lambda x: x.file_name)
assert discovered_files == expected_files
# Cleanup - none
def test_traversal_of_api_directory_tree_with_empty_stack(
self
):
# Setup
session = requests.Session()
credentials = ("username", "password")
stack = []
leaf_nodes = []
# Exercise
discovered_files = crawler.traverse_api_directory_tree(
session,
credentials,
stack,
leaf_nodes
)
# Verify
# expected_files = [
# file for file in mocked_set_of_files_available_to_download_multiple_sources_single_day
# if file.source == source_id
# ]
assert discovered_files == []
# Cleanup - none
def test_traversal_of_api_directory_tree_with_failed_request_down_the_line(
self,
mocked_datavault_api_with_down_the_line_failed_request,
):
# Setup
session = requests.Session()
credentials = ("username", "password")
stack = [
{
'name': '2020',
'parent': '/v2/list',
'url': '/v2/list/2020',
'size': 0,
'createdAt': '2020-01-01T00:00:00',
'updatedAt': '2020-12-02T00:00:00',
'writable': False,
'directory': True,
},
]
leaf_nodes = []
# Exercise
# Verify
with pytest.raises(requests.exceptions.HTTPError):
crawler.traverse_api_directory_tree(session, credentials, stack, leaf_nodes)
# Cleanup - none
def test_traversal_of_api_directory_with_repeated_node_in_stack(
self,
mocked_datavault_api_with_repeated_node
):
# Setup
session = requests.Session()
credentials = ("username", "password")
stack = [
{
'name': '2020',
'parent': '/v2/list',
'url': '/v2/list/2020',
'size': 0,
'createdAt': '2020-01-01T00:00:00',
'updatedAt': '2020-12-02T00:00:00',
'writable': False,
'directory': True,
},
]
leaf_nodes = []
# Exercise
discovered_instruments = crawler.traverse_api_directory_tree(
session, credentials, stack, leaf_nodes
)
# Verify
assert discovered_instruments == [
DiscoveredFileInfo(
file_name='COREREF_945_20201201.txt.bz2',
download_url=(
"https://api.icedatavault.icedataservices.com/v2/data/2020/12/01/S945/CORE/"
"20201201-S945_CORE_ALL_0_0"
),
source_id=945,
reference_date=datetime.datetime(year=2020, month=12, day=1),
size=15680,
md5sum='c9cc20020def775933be0be9690a9b5a',
)
]
# Cleanup - none
class TestDatavaultCrawl:
def test_crawler_with_instrument_level_url(
self,
mocked_datavault_api_instrument_level,
mocked_files_available_to_download_single_instrument,
):
# Setup
url_to_crawl = (
"https://api.icedatavault.icedataservices.com/v2/list/2020/07/16/S367/WATCHLIST"
)
credentials = ("username", "password")
# Exercise
discovered_files = crawler.datavault_crawler(url_to_crawl, credentials)
# Verify
assert discovered_files == mocked_files_available_to_download_single_instrument
# Cleanup - none
def test_crawler_with_single_source_and_single_day_setup(
self,
mocked_datavault_api_single_source_single_day,
mocked_files_available_to_download_single_source_single_day,
):
# Setup
url_to_crawl = "https://api.icedatavault.icedataservices.com/v2/list"
credentials = ("username", "password")
# Exercise
discovered_files = crawler.datavault_crawler(url_to_crawl, credentials)
# Verify
expected_files = mocked_files_available_to_download_single_source_single_day
expected_files.sort(key=lambda x: x.file_name, reverse=True)
assert discovered_files == expected_files
# Cleanup - none
def test_crawler_with_single_source_and_multiple_days_setup(
self,
mocked_datavault_api_single_source_multiple_days,
mocked_files_available_to_download_single_source_multiple_days,
):
# Setup
url_to_crawl = "https://api.icedatavault.icedataservices.com/v2/list"
credentials = ("username", "password")
# Exercise
discovered_files = crawler.datavault_crawler(url_to_crawl, credentials)
discovered_files.sort(key=lambda x: x.file_name)
# Verify
expected_files = mocked_files_available_to_download_single_source_multiple_days
expected_files.sort(key=lambda x: x.file_name)
assert discovered_files == expected_files
# Cleanup - none
def test_crawler_under_multiple_sources_and_single_day_scenario(
self,
mocked_datavault_api_multiple_sources_single_day,
mocked_files_available_to_download_multiple_sources_single_day
):
# Setup
url_to_crawl = "https://api.icedatavault.icedataservices.com/v2/list"
credentials = ("username", "password")
# Exercise
discovered_files = crawler.datavault_crawler(url_to_crawl, credentials)
discovered_files.sort(key=lambda x: x.file_name)
# Verify
expected_files = mocked_files_available_to_download_multiple_sources_single_day
expected_files.sort(key=lambda x: x.file_name)
assert discovered_files == expected_files
# Cleanup - none
def test_crawler_under_select_source_scenario(
self,
mocked_datavault_api_multiple_sources_single_day,
mocked_files_available_to_download_multiple_sources_single_day,
):
# Setup
url_to_crawl = "https://api.icedatavault.icedataservices.com/v2/list"
credentials = ("username", "password")
# Exercise
discovered_files = crawler.datavault_crawler(url_to_crawl, credentials, source_id=207)
discovered_files.sort(key=lambda x: x.file_name)
# Verify
expected_files = [
file for file in mocked_files_available_to_download_multiple_sources_single_day
if file.source_id == 207
]
expected_files.sort(key=lambda x: x.file_name)
assert discovered_files == expected_files
# Cleanup - none
| 36.69379 | 100 | 0.623424 | 16,955 | 0.989437 | 0 | 0 | 967 | 0.056431 | 0 | 0 | 4,246 | 0.247782 |
053c288fde8eaacd236f5d1f96f0de4ba7806a4f
| 2,976 |
py
|
Python
|
prototype/couch/couch_concurrent.py
|
ooici/pyon
|
122c629290d27f32f2f41dafd5c12469295e8acf
|
[
"BSD-2-Clause"
] | 2 |
2015-06-09T16:07:09.000Z
|
2015-07-28T10:06:31.000Z
|
prototype/couch/couch_concurrent.py
|
ooici/pyon
|
122c629290d27f32f2f41dafd5c12469295e8acf
|
[
"BSD-2-Clause"
] | 3 |
2020-07-22T15:14:55.000Z
|
2021-12-13T19:35:06.000Z
|
prototype/couch/couch_concurrent.py
|
ooici/pyon
|
122c629290d27f32f2f41dafd5c12469295e8acf
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
__author__ = 'Michael Meisinger'
import gevent
import random
from pyon.datastore.datastore import DatastoreManager
from pyon.public import IonObject
import ooi.timer
"""
from prototype.couch.couch_concurrent import runcc
runcc(dict(num_obj=100000, num_read=2000, num_thread=3))
"""
class CouchConcurrencyEval(object):
def __init__(self):
from pyon.core.bootstrap import container_instance
self.container = container_instance
self.rr = self.container.resource_registry
self.rr_store = DatastoreManager.get_datastore_instance("resources")
self.timer = ooi.timer.Timer()
def prepare_scenario1(self, nums):
num_objects = nums.get("num_obj", 10000)
self.res_objs = [IonObject("InstrumentDevice", name="resource_"+str(i)) for i in xrange(num_objects)]
res = self.rr_store.create_mult(self.res_objs)
self.res_ids = [res_id for _,res_id,_ in res]
self.timer.complete_step('create')
# Make indexes update if any
self.rr_store.read_doc(self.res_ids[0])
self.timer.complete_step('prep_done')
def run_cceval1(self, nums):
num_read = nums.get("num_read", 2000)
# for i in xrange(num_read):
# res_obj = self.rr.read(self.res_ids[0])
# self.timer.complete_step('read_same_n')
# for i in xrange(num_read):
# res_obj = self.rr.read(self.res_ids[0])
# self.timer.complete_step('read_same_n2')
# for i in xrange(num_read):
# res_obj = self.rr.read(self.res_ids[random.randint(0, len(self.res_ids)-1)])
# self.timer.complete_step('read_rand_n')
# for i in xrange(num_read):
# res_obj = self.rr_store.read_doc(self.res_ids[random.randint(0, len(self.res_ids)-1)])
# self.timer.complete_step('readdoc_rand_n')
num_thread = nums.get("num_thread", 5)
def _proc():
for i in xrange(int(num_read/num_thread)):
res_obj = self.rr.read(self.res_ids[random.randint(0, len(self.res_ids)-1)])
gls = [gevent.spawn(_proc) for i in xrange(num_thread)]
gevent.joinall(gls)
self.timer.complete_step('read_conc_same_n')
def _proc():
rr_store = DatastoreManager.get_datastore_instance("resources")
for i in xrange(int(num_read/num_thread)):
res_obj = rr_store.read(self.res_ids[random.randint(0, len(self.res_ids)-1)])
gls = [gevent.spawn(_proc) for i in xrange(num_thread)]
gevent.joinall(gls)
self.timer.complete_step('read_conc2_same_n')
self.timer.complete_step('end')
def print_timers(self):
prior_t = 0
for l,t in self.timer.times:
print l, t-prior_t
prior_t = t
def runcc(nums=None):
nums = nums if nums is not None else {}
cce = CouchConcurrencyEval()
cce.prepare_scenario1(nums)
cce.run_cceval1(nums)
cce.print_timers()
| 30.680412 | 109 | 0.652218 | 2,483 | 0.834341 | 0 | 0 | 0 | 0 | 0 | 0 | 954 | 0.320565 |
053c773f4e711f1e0bdb6a424915109fb1e18820
| 2,226 |
py
|
Python
|
serve.py
|
racterub/itac-flask
|
5b30e51c9a625483eaf312fb64472622b60b00eb
|
[
"MIT"
] | null | null | null |
serve.py
|
racterub/itac-flask
|
5b30e51c9a625483eaf312fb64472622b60b00eb
|
[
"MIT"
] | null | null | null |
serve.py
|
racterub/itac-flask
|
5b30e51c9a625483eaf312fb64472622b60b00eb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : 2020-04-06 23:45:54
# @Author : Racter Liu (racterub) ([email protected])
# @Link : https://racterub.me
# @License : MIT
from flask import Flask, render_template, request, url_for, redirect, session, send_from_directory, send_file, make_response
app = Flask(__name__)
app.secret_key = "test"
DEBUG = True
PORT = 8989
@app.route("/")
def index():
return "index of /"
# in-url param
@app.route("/db/<dbname>/<int:dbid>")
def db(dbname, dbid):
return "dbname: %s, dbid: %s" % (dbname, dbid+123)
# http get param
@app.route("/get/")
def get():
name = request.args.get("name")
passwd = request.args.get("passwd")
return "name: %s, password: %s" % (name, passwd)
@app.route("/post/")
def post():
name = request.form["name"]
passwd = request.form["passwd"]
return "name: %s, password: %s" % (name, passwd)
# implement login
@app.route("/login/", methods=["POST", "GET"])
def login():
if request.method == "POST":
try:
if (request.form["username"] == "test" and request.form["password"] == "test"):
session["user"] = request.form["username"]
return "Success"
else:
return redirect(url_for("login", next=request.endpoint))
except ValueError:
return "Something broke", 400
else:
return render_template("login.html")
# session
@app.route("/admin/")
def admin():
if ('user' not in session):
return redirect(url_for("login", next=request.endpoint))
return "admin!"
@app.route("/logout")
def logout():
if ('user' in session):
session.pop("user", None)
return "Logout"
else:
return redirect(url_for("index"))
# serve static file
@app.route("/robots.txt")
def robot():
return send_from_directory("static", "robots.txt")
# make_response
@app.route("/makeresponse/")
def makeresp():
resp = make_response("test", 200)
resp.headers['X-Author'] = "ITAC"
return resp
# Jinja
@app.route("/jinja/<name>")
def jinja(name):
return render_template("index.html", title=name)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=DEBUG, port=PORT)
| 24.195652 | 124 | 0.612309 | 0 | 0 | 0 | 0 | 1,632 | 0.733154 | 0 | 0 | 738 | 0.331536 |
053ff9d6d078fd3f1bdb31f203cc1982c89849c7
| 4,067 |
py
|
Python
|
api/list_bp.py
|
kziovas/practise-todolist
|
53e6b789b46e1104a076835ac606544ba1ef7b25
|
[
"MIT"
] | 2 |
2021-07-27T08:38:35.000Z
|
2021-08-03T10:00:58.000Z
|
api/list_bp.py
|
kziovas/practise-sanic-rest-api-to-do-list
|
53e6b789b46e1104a076835ac606544ba1ef7b25
|
[
"MIT"
] | null | null | null |
api/list_bp.py
|
kziovas/practise-sanic-rest-api-to-do-list
|
53e6b789b46e1104a076835ac606544ba1ef7b25
|
[
"MIT"
] | null | null | null |
from sanic.response import json
from sanic import Blueprint
from service import ListService
from injector import inject, singleton
from logging import Logger
from sanic import Sanic
@singleton
class ListController:
@inject
def __init__(self, logger: Logger, list_service : ListService):
self.logger = logger
self.list_service=list_service
async def add_user_list(self, user_id, task_list_id_dict : dict):
if (task_list_id_dict):
task_list_ids=task_list_id_dict["task_list_ids"]
else:
task_list_ids={"task_list_ids":[]}
await self.list_service.add_to_list(user_id, task_list_ids)
async def add_task_list(self, task_list_id, task_list_dict : dict):
if (task_list_dict):
task_list=task_list_dict["task_list"]
else:
task_list={"task_list":[]}
await self.list_service.add_to_list(task_list_id, task_list)
async def get_user_list(self, pattern : str = "*") -> list:
user_lists= await self.list_service.get_all_lists_by_pattern(pattern)
return user_lists
async def get_task_list(self, pattern : str = "*") -> list:
task_lists= await self.list_service.get_all_lists_by_pattern(pattern)
return task_lists
async def get_user_task_list(self, user_id : str, user_id_pattern : str, task_list_id : str, task_list_id_pattern : str) -> dict:
user_task_list= await self.list_service.get_user_task_list(user_id, user_id_pattern, task_list_id, task_list_id_pattern)
return user_task_list
async def add_user_task_list(self, user_id : str, user_id_pattern : str, task_list_id : str, task_list_id_pattern : str, task_list_dict : dict) -> dict:
if (task_list_dict):
task_list=task_list_dict["task_list"]
else:
task_list={"task_list":[]}
user_task_list= await self.list_service.add_user_task_list(user_id, user_id_pattern, task_list_id, task_list_id_pattern, task_list)
return user_task_list
def create_list_controller(list_controller: ListController, app: Sanic):
lists_bp = Blueprint("lists")
list_pattern="todo:list:"
user_list_pattern=list_pattern+"user*"
task_list_pattern=list_pattern+"task_list*"
@lists_bp.route("/lists/users", methods =['GET'])
async def get_user_list(request):
lists= await list_controller.get_user_list(user_list_pattern)
return json(lists)
@lists_bp.route("/lists/task-lists", methods =['GET'])
async def get_task_list(request):
lists= await list_controller.get_task_list(task_list_pattern)
return json(lists)
@lists_bp.route("/lists/users/<user_id>", methods =['POST'])
async def post_user_list(request,user_id):
await list_controller.add_user_list(user_id,request.json)
lists= await list_controller.get_user_list(user_list_pattern)
return json(lists)
@lists_bp.route("/lists/tasks/<task_list_id>", methods =['POST'])
async def post_task_list(request,task_list_id):
await list_controller.add_task_list(task_list_id,request.json)
lists= await list_controller.get_task_list(task_list_pattern)
return json(lists)
@lists_bp.route("/lists/users/<user_id>/<task_list_id>", methods =['GET'])
async def get_user_task_list(request,user_id, task_list_id):
user_id_pattern=list_pattern+user_id
task_list_id_pattern=list_pattern+task_list_id
lists= await list_controller.get_user_task_list(user_id, user_id_pattern, task_list_id, task_list_id_pattern)
return json(lists)
@lists_bp.route("/lists/<user_id>/<task_list_id>", methods =['POST'])
async def post_user_task_list(request,user_id,task_list_id):
user_id_pattern=list_pattern+user_id
task_list_id_pattern=list_pattern+task_list_id
lists=await list_controller.add_user_task_list(user_id, user_id_pattern, task_list_id, task_list_id_pattern, request.json)
return json(lists)
app.blueprint(lists_bp)
| 37.311927 | 157 | 0.715761 | 1,840 | 0.452422 | 0 | 0 | 3,573 | 0.878535 | 2,956 | 0.726826 | 309 | 0.075977 |
0541032df78b9eac36f755de81be4a580d936532
| 5,223 |
py
|
Python
|
src/AoC_2015/d24_sleigh_balance_subset_sum/sleigh_balance.py
|
derailed-dash/Advent-of-Code
|
12378baf33ef4a59958e84eb60e795b6530c22ba
|
[
"MIT"
] | 9 |
2021-12-31T20:13:03.000Z
|
2022-03-05T07:05:06.000Z
|
src/AoC_2015/d24_sleigh_balance_subset_sum/sleigh_balance.py
|
derailed-dash/Advent-of-Code
|
12378baf33ef4a59958e84eb60e795b6530c22ba
|
[
"MIT"
] | 1 |
2022-01-25T08:35:04.000Z
|
2022-01-29T00:07:00.000Z
|
src/AoC_2015/d24_sleigh_balance_subset_sum/sleigh_balance.py
|
derailed-dash/Advent-of-Code
|
12378baf33ef4a59958e84eb60e795b6530c22ba
|
[
"MIT"
] | null | null | null |
"""
Author: Darren
Date: 02/05/2021
Solving https://adventofcode.com/2015/day/24
We require three bags of equal weight.
Bag 1 in the passenger compartment, needs to have fewest packages.
Bags 2 and 3 to either side.
Solution:
Use subset sum function to work out which combinations of packages adds up to
total weight / number of bags (compartments).
The faster subsum is about 3x quicker than the version that uses itertools.combinations.
Once we have all combinations for the first bag, sort by the number of packages,
since we want the first bag to have fewest possible packages.
We don't care about what's in bags 2, 3...
I.e. because we know we will have valid combinations of packages that will add up to the same weight
"""
from __future__ import absolute_import
import logging
import os
import time
from math import prod
from itertools import combinations
# pylint: disable=logging-fstring-interpolation
SCRIPT_DIR = os.path.dirname(__file__)
INPUT_FILE = "input/input.txt"
SAMPLE_INPUT_FILE = "input/sample_input.txt"
def main():
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s:%(levelname)s:\t%(message)s")
# input_file = os.path.join(SCRIPT_DIR, SAMPLE_INPUT_FILE)
input_file = os.path.join(SCRIPT_DIR, INPUT_FILE)
with open(input_file, mode="rt") as f:
package_weights = [int(x) for x in f.read().splitlines()]
logging.info(f"Package weights: {package_weights}")
# Part 1
optimum_solution = distribute_packages(package_weights, 3)
logging.info(f"Solution found with QE {get_quantum_entanglement(optimum_solution)}")
logging.info(f"First bag: {optimum_solution}")
# Part 2
optimum_solution = distribute_packages(package_weights, 4)
logging.info(f"Solution found with QE {get_quantum_entanglement(optimum_solution)}")
logging.info(f"First bag: {optimum_solution}")
def distribute_packages(package_weights, number_of_bags) -> tuple:
logging.info(f"Solving for {number_of_bags} bags")
package_count = len(package_weights)
total_weight = sum(package_weights)
target_weight_per_bag = total_weight // number_of_bags
logging.debug(f"Total packages: {package_count}, with total weight: {total_weight}")
logging.debug(f"Target weight per bag: {target_weight_per_bag}")
# Get all combos for first bag.
# Sort by bags in the combo, since the first bag should have fewest packages.
first_bag_combos = faster_subset_sum(package_weights, target_weight_per_bag)
first_bag_combos = sorted(first_bag_combos, key=len)
# store first bag of optimum solution
optimum_solution = tuple()
for first_bag_combo in first_bag_combos:
# First bag must have smallest number of packages
# Skip any bag combos that have more packages than a previous solution
if len(optimum_solution) > 0:
if len(first_bag_combo) > len(optimum_solution):
continue
# if quantum entanglement of the first bag is higher than an existing solution,
# then skip it
if get_quantum_entanglement(first_bag_combo) >= get_quantum_entanglement(optimum_solution):
continue
optimum_solution = first_bag_combo
return optimum_solution
def get_quantum_entanglement(bag: tuple):
return prod(bag)
def faster_subset_sum(items: list, target: int, partial=[], results=[]) -> list:
"""
Determine all combinations of list items that add up to the target
Args:
numbers (list): A list of values
target (int): The total that the values need to add up to
partial (list, optional): Used by the function. Defaults to [].
results (list, optional): Used by the function. Defaults to [].
Returns:
list: The list of valid combinations
"""
total = sum(partial)
# check if the partial sum is equals to target, and if so
# add the current terms to the results list
if total == target:
results.append(partial)
# if the partial sum equals or exceed the target, no point in recursing through remaining terms.
if total >= target:
return []
for i, item in enumerate(items):
remaining_numbers = items[i + 1:]
faster_subset_sum(remaining_numbers, target, partial + [item], results)
return results
def simple_subset_sum(items, target: int) -> tuple:
""" Return a tuple of any combinations of items that adds up to the target
Args:
items (Sequence): List/set of items
target (int): The target sum to achieve
Yields:
Iterator[tuple]: Items that achieve the desired sum
"""
# Iterating through all possible subsets of collection from lengths 0 to n:
for i in range(len(items)+1):
for subset in combinations(items, i):
# printing the subset if its sum is x:
if sum(subset) == target:
yield subset
if __name__ == "__main__":
t1 = time.perf_counter()
main()
t2 = time.perf_counter()
print(f"Execution time: {t2 - t1:0.4f} seconds")
| 34.589404 | 103 | 0.681409 | 0 | 0 | 607 | 0.116217 | 0 | 0 | 0 | 0 | 2,759 | 0.52824 |
0541425822ca873cc1104abcaddefbf0b86d3c05
| 8,946 |
py
|
Python
|
artap/algorithm_bayesopt.py
|
tamasorosz/artap
|
e8df160bfc9c378c3fc96b0b86e92d75d89cf26b
|
[
"MIT"
] | 5 |
2021-06-13T17:04:37.000Z
|
2022-03-04T17:16:06.000Z
|
artap/algorithm_bayesopt.py
|
tamasorosz/artap
|
e8df160bfc9c378c3fc96b0b86e92d75d89cf26b
|
[
"MIT"
] | null | null | null |
artap/algorithm_bayesopt.py
|
tamasorosz/artap
|
e8df160bfc9c378c3fc96b0b86e92d75d89cf26b
|
[
"MIT"
] | 8 |
2021-03-11T18:23:47.000Z
|
2022-02-22T11:13:23.000Z
|
from .problem import Problem
from .algorithm import Algorithm
from .config import artap_root
import time
import numpy as np
import os
import sys
sys.path.append(artap_root + os.sep + "lib" + os.sep)
import bayesopt
from multiprocessing import Process, Pipe, Queue, Manager
# from multiprocessing.managers import BaseManager
_l_type = ['L_FIXED', 'L_EMPIRICAL', 'L_DISCRETE', 'L_MCMC', 'L_ERROR']
_sc_type = ['SC_MTL', 'SC_ML', 'SC_MAP', 'SC_LOOCV', 'SC_ERROR']
_surr_name = ["sGaussianProcess", "sGaussianProcessML", "sGaussianProcessNormal", "sStudentTProcessJef", "sStudentTProcessNIG"]
# Python module to get run BayesOpt library in a OO pattern.
# The objective module should inherit this one and override evaluateSample.
class BayesOptContinuous(object):
# Let's define the vector.
#
# For different options: see vector.h and vector.cpp .
# If a parameter is not defined, it will be automatically set
# to a default value.
def __init__(self, n_dim):
## Library vector
self.params = {}
## n dimensions
self.n_dim = n_dim
## Lower bounds
self.lb = np.zeros((self.n_dim,))
## Upper bounds
self.ub = np.ones((self.n_dim,))
@property
def parameters(self):
return self.params
@parameters.setter
def parameters(self, params):
self.params = params
@property
def lower_bound(self):
return self.lb
@lower_bound.setter
def lower_bound(self, lb):
self.lb = lb
@property
def upper_bound(self):
return self.ub
@upper_bound.setter
def upper_bound(self, ub):
self.ub = ub
## Function for testing.
# It should be overriden.
def evaluateSample(self, x_in):
raise NotImplementedError("Please Implement this method")
## Main function. Starts the optimization process.
def optimize(self):
min_val, x_out, error = bayesopt.optimize(self.evaluateSample, self.n_dim,
self.lb, self.ub,
self.params)
return min_val, x_out, error
class BayesOpt(Algorithm):
""" BayesOpt algorithms """
def __init__(self, problem: Problem, name="BayesOpt"):
super().__init__(problem, name)
self.problem = problem
self.options.declare(name='l_type', default='L_EMPIRICAL', values=_l_type,
desc='Type of learning for the kernel params')
self.options.declare(name='sc_type', default='SC_MAP', values=_sc_type,
desc='Type of learning for the kernel params')
self.options.declare(name='n_iterations', default=50, lower=1,
desc='Maximum BayesOpt evaluations')
self.options.declare(name='init_method', default=1,
desc='Init method') # 1-LHS, 2-Sobol
self.options.declare(name='n_init_samples', default=10, lower=1,
desc='Number of samples before optimization')
self.options.declare(name='n_iter_relearn', default=10, lower=1,
desc='Number of samples before relearn kernel')
self.options.declare(name='surr_name', default='sGaussianProcessML', values=_surr_name,
desc='Name of the surrogate function')
self.options.declare(name='surr_noise', default=1e-10, lower=0.0,
desc='Variance of observation noise')
class BayesOptClassSerial(BayesOptContinuous):
def __init__(self, algorithm):
n = len(algorithm.problem.parameters)
super().__init__(n)
# algorithm
self.algorithm = algorithm
# Size design variables.
self.lb = np.empty((n,))
self.ub = np.empty((n,))
self.params = {}
def evaluateSample(self, x):
return self.algorithm.evaluator.evaluate_scalar(x)
class BayesOptSerial(BayesOpt):
""" BayesOpt algorithms """
def __init__(self, problem: Problem, name="BayesOpt"):
super().__init__(problem, name)
self.bo = BayesOptClassSerial(self)
def run(self):
# Figure out bounds vectors.
i = 0
for parameter in self.problem.parameters:
bounds = parameter['bounds']
self.bo.lb[i] = bounds[0]
self.bo.ub[i] = bounds[1]
i += 1
# set bayesopt
self.bo.params['n_iterations'] = self.options['n_iterations']
self.bo.params['n_init_samples'] = self.options['n_init_samples']
self.bo.params['n_iter_relearn'] = self.options['n_iter_relearn']
self.bo.params['surr_name'] = self.options['surr_name']
self.bo.params['surr_noise'] = self.options['surr_noise']
self.bo.params['init_method'] = self.options['init_method']
self.bo.params['l_type'] = self.options['l_type']
self.bo.params['sc_type'] = self.options['sc_type']
self.bo.params['verbose_level'] = self.options['verbose_level']
t_s = time.time()
self.problem.logger.info("BayesOpt: surr_name{}".format(self.options['surr_name']))
mvalue, x_out, error = self.bo.optimize()
t = time.time() - t_s
self.problem.logger.info("BayesOpt: elapsed time: {} s".format(t))
# sync changed individual informations
self.problem.data_store.sync_all()
if error != 0:
print('Optimization FAILED.')
print("Error", error)
print('-' * 35)
else:
pass
# print('Optimization Complete, %f seconds' % (clock() - start))
# print("Result", x_out, mvalue)
# print('-' * 35)
class BayesOptClassParallel(Process, BayesOptContinuous):
def __init__(self, pipe, algorithm):
n = len(algorithm.problem.parameters)
Process.__init__(self)
BayesOptContinuous.__init__(self, n)
# algorithm
self.algorithm = algorithm
# output
self.mvalue = -1.0
self.x_out = -1.0
self.error = 0
self.pipe = pipe
# Size design variables.
self.lb = np.empty((n,))
self.ub = np.empty((n,))
self.params = {}
def run(self):
mvalue, x_out, error = self.optimize()
self.pipe.send('STOP')
# set output values
self.mvalue = mvalue
self.x_out = x_out
self.error = error
# output
print("output")
print(self.mvalue)
print(self.x_out)
print(self.error)
def evaluateSample(self, x):
self.pipe.send(x)
result = self.pipe.recv()
return result
class BayesOptParallel(BayesOpt):
""" BayesOpt algorithms """
def __init__(self, problem: Problem, name="BayesOpt"):
super().__init__(problem, name)
self.pipe_par, self.pipe_child = Pipe()
self.bo = BayesOptClassParallel(self.pipe_child, self)
def worker(self, pipe):
x = None
while True:
x = pipe.recv()
if str(x) == 'STOP':
break
result = self.bo.job.evaluate_scalar(x)
pipe.send(result)
def run(self):
# Figure out bounds vectors.
i = 0
for parameter in self.problem.parameters:
bounds = parameter['bounds']
self.bo.lb[i] = bounds[0]
self.bo.ub[i] = bounds[1]
i += 1
# set bayesopt
self.bo.params['n_iterations'] = self.options['n_iterations']
self.bo.params['n_init_samples'] = self.options['n_init_samples']
self.bo.params['n_iter_relearn'] = self.options['n_iter_relearn']
self.bo.params['surr_name'] = self.options['surr_name']
self.bo.params['surr_noise'] = self.options['surr_noise']
self.bo.params['init_method'] = self.options['init_method']
self.bo.params['l_type'] = self.options['l_type']
self.bo.params['sc_type'] = self.options['sc_type']
self.bo.params['verbose_level'] = self.options['verbose_level']
# process = Process(target=self.worker, args=(self.pipe_par, self.problem, ))
process = Process(target=self.worker, args=(self.pipe_par, ))
self.bo.start()
process.start()
self.bo.join()
process.join()
print(self.bo.mvalue)
print(self.bo.x_out)
print(self.bo.error)
print()
print(self.problem.data_store, len(self.problem.populations[-1].individuals))
# self.result = self.mvalue
"""
if self.bo.error != 0:
print('Optimization FAILED.')
print("Error", self.bo.error)
print('-' * 35)
else:
print('Optimization Complete, %f seconds' % (clock() - start))
print("Result", self.bo.x_out, self.bo.mvalue)
print('-' * 35)
"""
| 31.611307 | 127 | 0.59099 | 8,198 | 0.916387 | 0 | 0 | 403 | 0.045048 | 0 | 0 | 2,654 | 0.296669 |
0543197cdee0aacdb12b0d10810f263f61b2c8d7
| 538 |
py
|
Python
|
Sorting/ShortBubbleSort.py
|
sonivaibhv/Algo
|
ea53d61a17687ef08bb2a7dbfd9331fd10f49ea8
|
[
"MIT"
] | 1 |
2017-05-06T13:05:35.000Z
|
2017-05-06T13:05:35.000Z
|
Sorting/ShortBubbleSort.py
|
CuriousLearner/Algorithms
|
e44a04b3a0797da36a9de18c116a48241ce59d9d
|
[
"MIT"
] | null | null | null |
Sorting/ShortBubbleSort.py
|
CuriousLearner/Algorithms
|
e44a04b3a0797da36a9de18c116a48241ce59d9d
|
[
"MIT"
] | null | null | null |
def Short_Bubble_Sort(alist):
'''
Sorting alist using Short Bubble Sort
'''
passnum = len(alist) - 1
exchangesDone = True
while passnum > 0 and exchangesDone:
exchangesDone = False
for i in range(passnum):
if alist[i] > alist[i+1]:
exchangesDone = True
alist[i], alist[i+1] = alist[i+1], alist[i]
passnum = passnum - 1
return alist
def main():
alist=[20, 30, 40, 90, 50, 60, 70, 80, 100, 110]
print(Short_Bubble_Sort(alist))
main()
| 25.619048 | 59 | 0.565056 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.098513 |
0543991c023c828b9777016230758b911a5898f1
| 5,997 |
py
|
Python
|
src/arclink/libs/python/seiscomp/db/generic/genwrap.py
|
yannikbehr/seiscomp3
|
ebb44c77092555eef7786493d00ac4efc679055f
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
src/arclink/libs/python/seiscomp/db/generic/genwrap.py
|
yannikbehr/seiscomp3
|
ebb44c77092555eef7786493d00ac4efc679055f
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
src/arclink/libs/python/seiscomp/db/generic/genwrap.py
|
yannikbehr/seiscomp3
|
ebb44c77092555eef7786493d00ac4efc679055f
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 |
2021-09-15T08:13:27.000Z
|
2021-09-15T08:13:27.000Z
|
# This file was created by a source code generator:
# genxml2wrap.py
# Do not modify. Change the definition and
# run the generator again!
#
# (c) 2010 Mathias Hoffmann, GFZ Potsdam
#
#
import datetime
class _TrackedObject(object):
def __init__(self):
self.__dict__['last_modified'] = datetime.datetime(1970, 1, 1, 0, 0, 0)
def __setattr__(self, name, value):
if name not in self.__dict__ or self.__dict__[name] != value:
self.__dict__[name] = value
self.__dict__['last_modified'] = datetime.datetime.utcnow()
#
#
# QualityControl::QCLog
class base_QCLog(_TrackedObject):
publicID = ""
networkCode = ""
stationCode = ""
streamCode = ""
locationCode = ""
creatorID = ""
created = None
start = None
end = None
message = ""
# QualityControl::WaveformQuality
class base_WaveformQuality(_TrackedObject):
networkCode = ""
stationCode = ""
streamCode = ""
locationCode = ""
creatorID = ""
created = None
start = None
end = None
type = ""
parameter = ""
value = None
lowerUncertainty = None
upperUncertainty = None
windowLength = None
# QualityControl::Outage
class base_Outage(_TrackedObject):
networkCode = ""
stationCode = ""
streamCode = ""
locationCode = ""
creatorID = ""
created = None
start = None
end = None
# Inventory::StationReference
class base_StationReference(_TrackedObject):
stationID = ""
# Inventory::StationGroup
class base_StationGroup(_TrackedObject):
publicID = ""
type = None
code = ""
start = None
end = None
description = ""
latitude = None
longitude = None
elevation = None
# StationReference = ""
# Inventory::AuxSource
class base_AuxSource(_TrackedObject):
name = ""
description = ""
unit = ""
conversion = ""
sampleRateNumerator = None
sampleRateDenominator = None
remark = ""
# Inventory::AuxDevice
class base_AuxDevice(_TrackedObject):
publicID = ""
name = ""
description = ""
model = ""
manufacturer = ""
remark = ""
# AuxSource = ""
# Inventory::SensorCalibration
class base_SensorCalibration(_TrackedObject):
serialNumber = ""
channel = None
start = None
end = None
gain = None
gainFrequency = None
remark = ""
# Inventory::Sensor
class base_Sensor(_TrackedObject):
publicID = ""
name = ""
description = ""
model = ""
manufacturer = ""
type = ""
unit = ""
lowFrequency = None
highFrequency = None
response = ""
remark = ""
# SensorCalibration = ""
# Inventory::ResponsePAZ
class base_ResponsePAZ(_TrackedObject):
publicID = ""
name = ""
type = ""
gain = None
gainFrequency = None
normalizationFactor = None
normalizationFrequency = None
numberOfZeros = None
numberOfPoles = None
zeros = ""
poles = ""
remark = ""
# Inventory::ResponsePolynomial
class base_ResponsePolynomial(_TrackedObject):
publicID = ""
name = ""
gain = None
gainFrequency = None
frequencyUnit = ""
approximationType = ""
approximationLowerBound = None
approximationUpperBound = None
approximationError = None
numberOfCoefficients = None
coefficients = ""
remark = ""
# Inventory::DataloggerCalibration
class base_DataloggerCalibration(_TrackedObject):
serialNumber = ""
channel = None
start = None
end = None
gain = None
gainFrequency = None
remark = ""
# Inventory::Decimation
class base_Decimation(_TrackedObject):
sampleRateNumerator = None
sampleRateDenominator = None
analogueFilterChain = ""
digitalFilterChain = ""
# Inventory::Datalogger
class base_Datalogger(_TrackedObject):
publicID = ""
name = ""
description = ""
digitizerModel = ""
digitizerManufacturer = ""
recorderModel = ""
recorderManufacturer = ""
clockModel = ""
clockManufacturer = ""
clockType = ""
gain = None
maxClockDrift = None
remark = ""
# DataloggerCalibration = ""
# Decimation = ""
# Inventory::ResponseFIR
class base_ResponseFIR(_TrackedObject):
publicID = ""
name = ""
gain = None
decimationFactor = None
delay = None
correction = None
numberOfCoefficients = None
symmetry = ""
coefficients = ""
remark = ""
# Inventory::AuxStream
class base_AuxStream(_TrackedObject):
code = ""
start = None
end = None
device = ""
deviceSerialNumber = ""
source = ""
format = ""
flags = ""
restricted = None
# Inventory::Stream
class base_Stream(_TrackedObject):
code = ""
start = None
end = None
datalogger = ""
dataloggerSerialNumber = ""
dataloggerChannel = None
sensor = ""
sensorSerialNumber = ""
sensorChannel = None
clockSerialNumber = ""
sampleRateNumerator = None
sampleRateDenominator = None
depth = None
azimuth = None
dip = None
gain = None
gainFrequency = None
gainUnit = ""
format = ""
flags = ""
restricted = None
shared = None
# Inventory::SensorLocation
class base_SensorLocation(_TrackedObject):
publicID = ""
code = ""
start = None
end = None
latitude = None
longitude = None
elevation = None
# AuxStream = ""
# Stream = ""
# Inventory::Station
class base_Station(_TrackedObject):
publicID = ""
code = ""
start = None
end = None
description = ""
latitude = None
longitude = None
elevation = None
place = ""
country = ""
affiliation = ""
type = ""
archive = ""
archiveNetworkCode = ""
restricted = None
shared = None
remark = ""
# SensorLocation = ""
# Inventory::Network
class base_Network(_TrackedObject):
publicID = ""
code = ""
start = None
end = None
description = ""
institutions = ""
region = ""
type = ""
netClass = ""
archive = ""
restricted = None
shared = None
remark = ""
# Station = ""
# Routing::RouteArclink
class base_RouteArclink(_TrackedObject):
address = ""
start = None
end = None
priority = None
# Routing::RouteSeedlink
class base_RouteSeedlink(_TrackedObject):
address = ""
priority = None
# Routing::Route
class base_Route(_TrackedObject):
publicID = ""
networkCode = ""
stationCode = ""
locationCode = ""
streamCode = ""
# RouteArclink = ""
# RouteSeedlink = ""
# Routing::Access
class base_Access(_TrackedObject):
networkCode = ""
stationCode = ""
locationCode = ""
streamCode = ""
user = ""
start = None
end = None
| 18.395706 | 73 | 0.691346 | 5,137 | 0.856595 | 0 | 0 | 0 | 0 | 0 | 0 | 1,246 | 0.207771 |
05461bddcd3bd1546efdbcd5e16d6aa27b51efe8
| 7,754 |
py
|
Python
|
mailchimp_marketing_asyncio/models/problem_detail_document.py
|
john-parton/mailchimp-asyncio
|
3865ca0867bec8f537dc1e3256aa3a160c00f8a2
|
[
"Apache-2.0"
] | null | null | null |
mailchimp_marketing_asyncio/models/problem_detail_document.py
|
john-parton/mailchimp-asyncio
|
3865ca0867bec8f537dc1e3256aa3a160c00f8a2
|
[
"Apache-2.0"
] | null | null | null |
mailchimp_marketing_asyncio/models/problem_detail_document.py
|
john-parton/mailchimp-asyncio
|
3865ca0867bec8f537dc1e3256aa3a160c00f8a2
|
[
"Apache-2.0"
] | 1 |
2022-03-09T14:52:22.000Z
|
2022-03-09T14:52:22.000Z
|
# coding: utf-8
"""
Mailchimp Marketing API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 3.0.74
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ProblemDetailDocument(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'type': 'str',
'title': 'str',
'status': 'int',
'detail': 'str',
'instance': 'str'
}
attribute_map = {
'type': 'type',
'title': 'title',
'status': 'status',
'detail': 'detail',
'instance': 'instance'
}
def __init__(self, type=None, title=None, status=None, detail=None, instance=None): # noqa: E501
"""ProblemDetailDocument - a model defined in Swagger""" # noqa: E501
self._type = None
self._title = None
self._status = None
self._detail = None
self._instance = None
self.discriminator = None
self.type = type
self.title = title
self.status = status
self.detail = detail
self.instance = instance
@property
def type(self):
"""Gets the type of this ProblemDetailDocument. # noqa: E501
An absolute URI that identifies the problem type. When dereferenced, it should provide human-readable documentation for the problem type. # noqa: E501
:return: The type of this ProblemDetailDocument. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ProblemDetailDocument.
An absolute URI that identifies the problem type. When dereferenced, it should provide human-readable documentation for the problem type. # noqa: E501
:param type: The type of this ProblemDetailDocument. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def title(self):
"""Gets the title of this ProblemDetailDocument. # noqa: E501
A short, human-readable summary of the problem type. It shouldn't change based on the occurrence of the problem, except for purposes of localization. # noqa: E501
:return: The title of this ProblemDetailDocument. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this ProblemDetailDocument.
A short, human-readable summary of the problem type. It shouldn't change based on the occurrence of the problem, except for purposes of localization. # noqa: E501
:param title: The title of this ProblemDetailDocument. # noqa: E501
:type: str
"""
if title is None:
raise ValueError("Invalid value for `title`, must not be `None`") # noqa: E501
self._title = title
@property
def status(self):
"""Gets the status of this ProblemDetailDocument. # noqa: E501
The HTTP status code (RFC2616, Section 6) generated by the origin server for this occurrence of the problem. # noqa: E501
:return: The status of this ProblemDetailDocument. # noqa: E501
:rtype: int
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ProblemDetailDocument.
The HTTP status code (RFC2616, Section 6) generated by the origin server for this occurrence of the problem. # noqa: E501
:param status: The status of this ProblemDetailDocument. # noqa: E501
:type: int
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def detail(self):
"""Gets the detail of this ProblemDetailDocument. # noqa: E501
A human-readable explanation specific to this occurrence of the problem. [Learn more about errors](/developer/guides/get-started-with-mailchimp-api-3/#Errors). # noqa: E501
:return: The detail of this ProblemDetailDocument. # noqa: E501
:rtype: str
"""
return self._detail
@detail.setter
def detail(self, detail):
"""Sets the detail of this ProblemDetailDocument.
A human-readable explanation specific to this occurrence of the problem. [Learn more about errors](/developer/guides/get-started-with-mailchimp-api-3/#Errors). # noqa: E501
:param detail: The detail of this ProblemDetailDocument. # noqa: E501
:type: str
"""
if detail is None:
raise ValueError("Invalid value for `detail`, must not be `None`") # noqa: E501
self._detail = detail
@property
def instance(self):
"""Gets the instance of this ProblemDetailDocument. # noqa: E501
A string that identifies this specific occurrence of the problem. Please provide this ID when contacting support. # noqa: E501
:return: The instance of this ProblemDetailDocument. # noqa: E501
:rtype: str
"""
return self._instance
@instance.setter
def instance(self, instance):
"""Sets the instance of this ProblemDetailDocument.
A string that identifies this specific occurrence of the problem. Please provide this ID when contacting support. # noqa: E501
:param instance: The instance of this ProblemDetailDocument. # noqa: E501
:type: str
"""
if instance is None:
raise ValueError("Invalid value for `instance`, must not be `None`") # noqa: E501
self._instance = instance
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ProblemDetailDocument, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProblemDetailDocument):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.995745 | 181 | 0.607557 | 7,387 | 0.95267 | 0 | 0 | 4,563 | 0.58847 | 0 | 0 | 4,685 | 0.604204 |
054f8d75e59fcecffb5d9b04254a41c1dbff6742
| 1,118 |
py
|
Python
|
loadingstatus.py
|
NesbiDevelopment/PythonHelper
|
6dc7545cd0ebd5bd3daf32860a7dc45d1caf50e3
|
[
"MIT"
] | null | null | null |
loadingstatus.py
|
NesbiDevelopment/PythonHelper
|
6dc7545cd0ebd5bd3daf32860a7dc45d1caf50e3
|
[
"MIT"
] | null | null | null |
loadingstatus.py
|
NesbiDevelopment/PythonHelper
|
6dc7545cd0ebd5bd3daf32860a7dc45d1caf50e3
|
[
"MIT"
] | null | null | null |
import time
import sys
class Loadingstatus:
def __init__(self, barlength = 20):
self.loading = 0
self.lastCall = self.getMilliSeconds()
self.barlength = barlength
def getMilliSeconds(self):
return int(time.time() * 1000)
def updatePending(self):
newCall = self.getMilliSeconds()
if newCall-self.lastCall > 500:
#switch loading
loadingSymbol = {0: "/", 1: "-", 2: "\\", 3: "|"}[self.loading]
self.loading = (self.loading + 1)%4
sys.stdout.write(loadingSymbol+"\r")
sys.stdout.flush()
self.lastCall = newCall
def updateLoading(self,percent):
sys.stdout.write(self.getBar(percent)+"\r")
sys.stdout.flush()
def endLoading(self,percent = 1):
print(self.getBar(percent))
def getBar(self,percent):
progressLength = int((self.barlength-2)*percent)
progress = "=" * progressLength
bar = "-" * (self.barlength-progressLength-2)
percentString = "%.2f" % (percent*100)
return "["+progress+bar+"] "+percentString+"%"
| 31.055556 | 75 | 0.584079 | 1,093 | 0.977639 | 0 | 0 | 0 | 0 | 0 | 0 | 58 | 0.051878 |
0552a237d536bb49e4a74fe8039eabfd37370524
| 1,596 |
py
|
Python
|
main.py
|
WillyHHsu/rest
|
1adba475579cb2c0f9b8690b7f822c02b483146a
|
[
"MIT"
] | null | null | null |
main.py
|
WillyHHsu/rest
|
1adba475579cb2c0f9b8690b7f822c02b483146a
|
[
"MIT"
] | null | null | null |
main.py
|
WillyHHsu/rest
|
1adba475579cb2c0f9b8690b7f822c02b483146a
|
[
"MIT"
] | null | null | null |
import os
from fastapi import FastAPI
from fastapi_sqlalchemy import DBSessionMiddleware
from fastapi_sqlalchemy import db
from dotenv import load_dotenv
from sqlalchemy import schema
from db import models as db_model
from schemas import models as schema
load_dotenv()
POSTGRES_USER = os.getenv('POSTGRES_USER')
POSTGRES_PASSWORD = os.getenv('POSTGRES_PASSWORD')
POSTGRES_DB = os.getenv('POSTGRES_DB')
POSTGRES_URL = os.getenv('POSTGRES_URL')
POSTGRES_PORT = os.getenv('POSTGRES_PORT', 5432)
app = FastAPI(
title="API REST",
description="Uma API REST by WillyHHsu",
)
app.add_middleware(
DBSessionMiddleware,
db_url=f"postgresql://{POSTGRES_USER}:{POSTGRES_PASSWORD}@{POSTGRES_URL}:{POSTGRES_PORT}/{POSTGRES_DB}"
)
@app.get("/users")
def get_users():
users = db.session.query(db_model.Player).all()
return users
@app.post("/tournament",
summary='Cadastra um novo torneio',
response_model=schema.Tournament)
def new_tournament(tornament_request: schema.Tournament):
db.session.add(db_model.Tornament(tornament_request))
db.session.commit()
return schema.Tournament(**tornament_request)
@app.post("/tournament/{id_tournament}/competitor",
summary='Cadastra um novo competidor')
def new_tournament(id_tournament):
return db.session.query(db_model.Tournament).filter(id_tournament=id_tournament).first()
@app.get("/tournament/{id_tournament}/match",
summary='Lista as partidas de um torneio')
def list_match(id_tournament):
return db.session.query(db_model.Game).filter(id_tournament=id_tournament).all()
| 29.555556 | 107 | 0.759398 | 0 | 0 | 0 | 0 | 849 | 0.531955 | 0 | 0 | 393 | 0.246241 |
05546175c9355e358802def95353b9059d638d79
| 866 |
py
|
Python
|
src/compas_blender/utilities/data.py
|
KEERTHANAUDAY/compas
|
4d1101cf302f95a4472a01a1265cc64eaec6aa4a
|
[
"MIT"
] | null | null | null |
src/compas_blender/utilities/data.py
|
KEERTHANAUDAY/compas
|
4d1101cf302f95a4472a01a1265cc64eaec6aa4a
|
[
"MIT"
] | null | null | null |
src/compas_blender/utilities/data.py
|
KEERTHANAUDAY/compas
|
4d1101cf302f95a4472a01a1265cc64eaec6aa4a
|
[
"MIT"
] | null | null | null |
import bpy
__all__ = [
"delete_all_data",
]
def delete_all_data():
"""Delete all collections, mesh and curve objects, meshes, curves, materials."""
for collection in bpy.data.collections:
bpy.data.collections.remove(collection)
for obj in bpy.data.objects:
if obj.type == 'MESH':
bpy.data.objects.remove(obj)
elif obj.type == 'CURVE':
bpy.data.objects.remove(obj)
for mesh in bpy.data.meshes:
bpy.data.meshes.remove(mesh)
for curve in bpy.data.curves:
bpy.data.curves.remove(curve)
for material in bpy.data.materials:
bpy.data.materials.remove(material)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
pass
| 27.0625 | 84 | 0.51963 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 286 | 0.330254 |
055668b6a61ba32a80522c93f3aa4dbcf035bb7b
| 2,335 |
py
|
Python
|
teams_to_tsv.py
|
FSU-ACM-OSSG/Contest-Server
|
f9aabd9742a6aa78cbefc685fd2760a1f83d7721
|
[
"MIT"
] | 8 |
2019-01-13T21:57:53.000Z
|
2021-11-29T12:32:48.000Z
|
teams_to_tsv.py
|
FSU-ACM-OSSG/Contest-Server
|
f9aabd9742a6aa78cbefc685fd2760a1f83d7721
|
[
"MIT"
] | 73 |
2018-02-13T00:58:39.000Z
|
2022-02-10T11:59:53.000Z
|
teams_to_tsv.py
|
FSU-ACM-OSSG/Contest-Server
|
f9aabd9742a6aa78cbefc685fd2760a1f83d7721
|
[
"MIT"
] | 4 |
2018-02-08T18:56:54.000Z
|
2019-02-13T19:01:53.000Z
|
##############
# team_to_tsv script
# Creates two tsv files for importing into domjudge
# Team info gets stored inside teams.tsv in the following format
# <team_id(int)> <external_id> <category_id> <team_name>
# Account info gets stored inside acccounts.tsv in the following format
# team <team-name> <user-name> <password> <teamid>
#
# Import teams.tsv first, then accounts.tsv
#
# NOTE 1 : Domjudge doesn't insert teams with ID < 1
from app.models.Team import *
with open("teams.tsv", "w+") as teams_tsv, \
open("accounts.tsv", "w+") as accounts_tsv:
# Headers requiered by domjudge
teams_tsv.write("teams\t1\n")
accounts_tsv.write("accounts\t1\n")
walkin_counter = 1
for team in Team.objects.all():
# Only make 100 walk-in accounts
if walkin_counter > 101:
break;
# Accounts that are not in use are assigned to walk-ins
if team.team_name is None:
team.team_name = "".join(("Walk-in-", str(walkin_counter)))
walkin_counter += 1
# Empty team names are assign a dummy value
if team.team_name.isspace():
team.team_name = "UnnamedTeam"
# Avoiding team number 0, refer to NOTE 1 in the header
if team.teamID == "acm-0":
continue
teams_tsv.write(u"\t".join(
[team.teamID.strip("acm-"), # To only get ID number
team.teamID, # Set to external ID for exporting
"2", # Category ID of Participants Category - See footnote
team.team_name.strip('\t'), # So tabs in team_name don't interfere
'\n']))
accounts_tsv.write(u"\t".join(
["team",
team.team_name.strip('\t'), # So tabs in team_name don't interfere
'{0}-{1}'.format('team', team.teamID.split('-')[1].zfill(3)),
team.domPass,
# team.teamID.strip("acm-"), # To only get ID number
'\n']))
#
# FOOTNOTE: Team Category
#
# This value determines the team_category. Domjudge's defaults are:
# 1 -> System
# 2 -> Self-Registered
# 3 -> Jury
#
# Since System and Jury are meant for admin, we assign teams to being
# "self-registered" because you can't self-register for our contests
# anyway, and this is easier than making you create a new category first.
#
| 36.484375 | 77 | 0.614561 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,426 | 0.610707 |
05573cc8d3a341c5de3d72784bf092562a5a2e63
| 1,848 |
py
|
Python
|
mishris/utils/util.py
|
virazura/mishris
|
60762364347bfa50ffc9948e9d227c569fe68da5
|
[
"MIT"
] | null | null | null |
mishris/utils/util.py
|
virazura/mishris
|
60762364347bfa50ffc9948e9d227c569fe68da5
|
[
"MIT"
] | null | null | null |
mishris/utils/util.py
|
virazura/mishris
|
60762364347bfa50ffc9948e9d227c569fe68da5
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import frappe
import filetype
"""
NEED IMPORT LOCAL LANG FROM FRAPPE
"""
def attach_print(doctype, name, file_name=None, print_format=None, style=None, html=None, doc=None, lang=None, encrypt=False, password=None, print_letterhead=True):
from frappe.utils import scrub_urls
from PyPDF2 import PdfFileWriter
from PyPDF2 import PdfFileReader
from frappe.utils.print_format import read_multi_pdf
output = PdfFileWriter()
if not file_name: file_name = name
file_name = file_name.replace(' ','').replace('/','-')
print_settings = frappe.db.get_singles_dict("Print Settings")
hr_settings = frappe.db.get_singles_dict("HR Settings")
# Not Check Lang on this print format
#_lang = local.lang
#set lang as specified in print format attachment
#if lang: local.lang = lang
#local.flags.ignore_print_permissions = True
no_letterhead = not print_letterhead
if int(print_settings.send_print_as_pdf or 0):
output = frappe.get_print(doctype, name, print_format=print_format, style=style, html=html, as_pdf=True, doc=doc, no_letterhead=no_letterhead, output=output)
if int(hr_settings.encrypt_salary_slip):
output.encrypt(password)
salary_slip = read_multi_pdf(output)
# butuh diubah ke bytes
out = {
"fname": file_name + ".pdf",
"fcontent": salary_slip
}
else:
out = {
"fname": file_name + ".html",
"fcontent": scrub_urls(get_print(doctype, name, print_format=print_format, style=style, html=html, doc=doc, no_letterhead=no_letterhead)).encode("utf-8")
}
#local.flags.ignore_print_permissions = False
#reset lang to original local lang
#local.lang = _lang
return out
| 34.222222 | 165 | 0.67803 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 433 | 0.234307 |
055a29385d9e76d3a424d3a90ed95bbdc4015019
| 4,906 |
py
|
Python
|
cleverapi/clever_api.py
|
oncecreated/cleverapi
|
39b41860604a909d3e5262c1c795c0741570a653
|
[
"MIT"
] | 13 |
2018-06-30T14:16:42.000Z
|
2020-03-04T20:23:47.000Z
|
cleverapi/clever_api.py
|
oncecreated/cleverapi
|
39b41860604a909d3e5262c1c795c0741570a653
|
[
"MIT"
] | 11 |
2018-09-09T09:54:27.000Z
|
2019-04-15T13:40:19.000Z
|
cleverapi/clever_api.py
|
oncecreated/cleverapi
|
39b41860604a909d3e5262c1c795c0741570a653
|
[
"MIT"
] | 14 |
2018-07-24T17:38:56.000Z
|
2020-03-04T20:24:12.000Z
|
import hashlib
import json
import uuid
import requests
import aiohttp
from .exceptions import ApiResponseError
from .action import Action
class BaseCleverApi():
def __init__(self, access_token, version="5.73"):
self.access_token = access_token
self.api_version = version
self.device_id = uuid.uuid4().hex[:16]
self.api_host = "api.vk.com"
def fetch(self, method, data=None):
if data is None:
data = {}
return method, data
def get_longpoll(self, owner_id, video_id):
data = {"owner_id": owner_id, "video_id": video_id}
return self.fetch("video.getLongPollServer", data)
def get_start_data(self):
data = {
"build_ver": "503028",
"need_leaderboard": "0",
"func_v": "6",
"lang": "ru",
"https": "1"
}
return self.fetch("execute.getStartData", data)
def get_user(self):
return self.fetch("users.get")
def get_hash(self, additional: list, user_id):
ids = "".join(map(str, additional)) + "3aUFMZGRCJ"
ids_hash = hashlib.md5(ids.encode()).hexdigest()
user = str(int(user_id) ^ 202520)
user_hash = hashlib.md5(user.encode()).hexdigest()
device = str(self.device_id) + "0MgLscD6R3"
device_hash = hashlib.md5(device.encode()).hexdigest()
return "{}#{}#{}".format(ids_hash, user_hash, device_hash)
def bump(self, lat, lon):
data = {"lat": lat, "lon": lon, "prod": 1, "func_v": 1}
return self.fetch("execute.bump", data)
def send_action(self, *, action_id: Action, user_id):
secure_hash = self.get_hash([action_id.value], user_id)
data = {"action_id": action_id.value, "hash": secure_hash}
return self.fetch("streamQuiz.trackAction", data)
def send_answer(self, *, coins_answer: bool, game_id, answer_id, question_id, user_id):
secure_hash = self.get_hash([game_id, question_id], user_id)
data = {
"answer_id": answer_id,
"question_id": question_id,
"device_id": self.device_id,
"hash": secure_hash,
}
if coins_answer:
data["coins_answer"] = True
return self.fetch("streamQuiz.sendAnswer", data)
def get_gifts(self):
return self.fetch("execute.getGifts")
def purchase_gift(self, gift_id):
data = {"gift_id": gift_id}
return self.fetch("streamQuiz.purchaseGift", data)
def get_daily_rewards(self):
return self.fetch("streamQuiz.getDailyRewardsData")
def get_train_questions(self):
return self.fetch("streamQuiz.getTrainQuestions")
def use_extra_life(self):
return self.fetch("streamQuiz.useExtraLife")
def get_nearby_users(self, lat, lon):
data = {"lat": lat, "lon": lon}
return self.fetch("execute.getNearbyUsers", data)
def comment(self, *, owner_id, video_id, message):
data = {
"owner_id": owner_id,
"video_id": video_id,
"message": message
}
return self.fetch("execute.createComment", data)
class CleverApi(BaseCleverApi):
def __init__(self, access_token, version="5.73"):
super().__init__(access_token, version=version)
self.session = requests.Session()
self.session.headers.update({
"User-Agent": "Клевер/2.3.3 (Redmi Note 5; "
"Android 28; VK SDK 1.6.8; com.vk.quiz)".encode(
"utf-8")
})
def fetch(self, method, data=None):
if data is None:
data = {}
data.update({
"access_token": self.access_token,
"v": self.api_version,
"lang": "ru",
"https": 1
})
url = f"https://{self.api_host}/method/{method}"
content = self.session.post(url, data=data).json()
error = content.get("error")
if error is not None:
raise ApiResponseError(json.dumps(content))
return content["response"]
class AsyncCleverApi(BaseCleverApi):
def __init__(self, access_token, connector, version="5.73"):
super().__init__(access_token, version=version)
self.connector = connector
async def fetch(self, method, data=None):
if data is None:
data = {}
data.update({
"access_token": self.access_token,
"v": self.api_version,
"lang": "ru",
"https": 1
})
url = f"https://{self.api_host}/method/{method}"
async with self.connector.session.post(url, data=data) as response:
content = await response.json()
error = content.get("error")
if error is not None:
raise ApiResponseError(json.dumps(content))
return content["response"]
| 28.858824 | 91 | 0.584183 | 4,763 | 0.969666 | 0 | 0 | 0 | 0 | 617 | 0.125611 | 878 | 0.178746 |
055ac96948dda92e22c15b66cc5f914681a2cae3
| 5,350 |
py
|
Python
|
blagging/views.py
|
androiddrew/blag-fork
|
249144c9a017581a6c5e387f5d86f33421d82ae3
|
[
"MIT"
] | null | null | null |
blagging/views.py
|
androiddrew/blag-fork
|
249144c9a017581a6c5e387f5d86f33421d82ae3
|
[
"MIT"
] | 7 |
2017-01-03T15:34:30.000Z
|
2017-07-13T15:27:08.000Z
|
blagging/views.py
|
androiddrew/blag-fork
|
249144c9a017581a6c5e387f5d86f33421d82ae3
|
[
"MIT"
] | null | null | null |
from datetime import datetime as dt
from flask import render_template, redirect, request, url_for, abort
from flask_login import login_user, logout_user, login_required, current_user, login_url
from . import app, db, login_manager
from .models import Post, Tag, Author, tags as Post_Tag
from .forms import LoginForm, PostForm
# Auth#################
@login_manager.user_loader
def load_user(userid):
return Author.query.get(int(userid))
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = Author.get_by_username(form.username.data)
if user is not None and user.check_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('index'))
return render_template('login.html', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
# MAIN##############
@app.route('/')
@app.route('/page/<int:page_num>')
def index(page_num=1):
query = Post.query.filter(Post.published == True)
pagination = query.order_by(Post.date.desc()).paginate(page=page_num, per_page=app.config['POST_PER_PAGE'],
error_out=True)
return render_template('blog.html', pagination=pagination, authors=Author.query.all())
@app.route('/post/<slug>', methods=['GET', 'POST'])
def post(slug):
post = Post.query.filter_by(_display_title=slug).filter(Post.published == True).first_or_404()
return render_template('post.html', post=post)
@app.route('/tag/<name>')
@app.route('/tag/<name>/<int:page_num>')
def tag(name, page_num=1):
tag = Tag.query.filter_by(name=name).first_or_404()
query = Post.query.join(Post_Tag).join(Tag).filter(Tag.id == tag.id).filter(Post.published == True)
pagination = query.filter(Post.published == True).order_by(Post.date.desc()).paginate(page=page_num,
per_page=app.config[
'POST_PER_PAGE'],
error_out=True)
return render_template('tag.html', pagination=pagination, tag=tag)
@app.route('/author/<display_name>')
def user(display_name):
user = Author.query.filter_by(display_name=display_name).first_or_404()
return render_template('author.html', author=user)
@app.route('/add', methods=['GET', 'POST'])
@login_required
def add():
form = PostForm()
if form.validate_on_submit():
title = form.title.data
short_desc = form.short_desc.data
body = form.body.data
tags = form.tags.data
published = form.published.data
post = Post(author=current_user, title=title, display_title=title, short_desc=short_desc, body=body, tags=tags,
published=published)
with db.session.no_autoflush:
db.session.add(post)
db.session.commit()
return redirect(url_for('index'))
return render_template('post_form.html', form=form)
@app.route('/edit')
@login_required
def edit():
posts = Post.query.filter(Post.author_id == current_user.id).order_by(Post.date.desc()).all()
return render_template('edit_list.html', posts=posts)
@app.route('/edit/<int:post_id>', methods=['GET', 'POST'])
@login_required
def edit_post(post_id):
post = Post.query.get_or_404(post_id)
if current_user != post.author:
abort(403)
form = PostForm(obj=post, post_id=post.id)
if form.validate_on_submit():
form.populate_obj(post)
db.session.commit()
return redirect(url_for('index'))
return render_template('post_form.html', form=form)
@app.route('/preview', methods=['GET', 'POST'])
@login_required
def preview_post():
result = request.get_json(force=True)
form_data = dict()
form_data['date'] = dt.utcnow()
form_data['author'] = current_user
for field in result:
form_data[field['name']] = field['value']
form_data['tags'] = form_data.get('tags').split(',')
return render_template('post_preview.html', post=form_data)
# MAIN OTHER###########
@app.errorhandler(403)
def page_not_found(e):
return render_template('403.html'), 403
@app.errorhandler(404) # bluprintname.app_errorhandler will register for the entire app when using blueprints
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def server_error(e):
app.logger.error('Server Error: {}'.format(e))
return render_template('500.html'), 500
@app.context_processor
def inject_tags():
"""context_processor similar to the app_context_processor for blueprints"""
return dict(all_tags=Tag.all, tags_count=Tag.tag_count)
@app.context_processor
def inject_recent_posts():
"""context_processor similar to the app_context_processor for blueprints for recent posts"""
return dict(recent_posts=Post.recent)
@app.context_processor
def inject_auth_url():
return dict(auth_url=login_url)
@app.template_filter('strftime')
def _jinja2_filter_datetime(date, fmt=None):
if fmt is None:
fmt = '%Y-%m-%d'
return date.strftime(fmt)
| 33.647799 | 119 | 0.65271 | 0 | 0 | 0 | 0 | 4,900 | 0.915888 | 0 | 0 | 837 | 0.156449 |
055b1e351a5242b821e047dfcb5c1f7591a3c693
| 509 |
py
|
Python
|
id.py
|
txkodo/pyDatapack
|
f647e0043d09e3d456a8019fb00cb945c0d6b6a7
|
[
"MIT"
] | null | null | null |
id.py
|
txkodo/pyDatapack
|
f647e0043d09e3d456a8019fb00cb945c0d6b6a7
|
[
"MIT"
] | null | null | null |
id.py
|
txkodo/pyDatapack
|
f647e0043d09e3d456a8019fb00cb945c0d6b6a7
|
[
"MIT"
] | null | null | null |
import string
import secrets
def gen_objective_id():
return ''.join(secrets.choice(string.ascii_lowercase + string.digits) for _ in range(16))
def gen_scoreholder_id():
return ''.join(secrets.choice(string.ascii_lowercase + string.digits) for _ in range(16))
def gen_function_id():
return ''.join(secrets.choice(string.ascii_lowercase + string.digits) for _ in range(16))
def gen_datapath_id():
return ''.join(secrets.choice(string.ascii_lowercase + string.digits) for _ in range(16))
| 36.357143 | 92 | 0.740668 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.015717 |
055c22d5891f38a9238c8713208320ff8c57d8d5
| 185 |
py
|
Python
|
bot/states/states.py
|
amtp1/ubi-4
|
bbfa07f0936960058d7f282b1c83be7150494dc1
|
[
"BSD-3-Clause"
] | null | null | null |
bot/states/states.py
|
amtp1/ubi-4
|
bbfa07f0936960058d7f282b1c83be7150494dc1
|
[
"BSD-3-Clause"
] | null | null | null |
bot/states/states.py
|
amtp1/ubi-4
|
bbfa07f0936960058d7f282b1c83be7150494dc1
|
[
"BSD-3-Clause"
] | null | null | null |
from aiogram.dispatcher.filters.state import StatesGroup, State
class Attack(StatesGroup):
set_phone_call = State()
class Mailing(StatesGroup):
set_mailing_text_call = State()
| 26.428571 | 63 | 0.783784 | 118 | 0.637838 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
055c45d3bc0e2eb761a389c587de2205205755a0
| 255 |
py
|
Python
|
apps/user/urls.py
|
dimastbk/x-one_test
|
aedf4dd4c5299c1f6e6afde2f557bd284e50f6dc
|
[
"MIT"
] | 1 |
2020-08-10T11:46:17.000Z
|
2020-08-10T11:46:17.000Z
|
apps/user/urls.py
|
dimastbk/x-one_test
|
aedf4dd4c5299c1f6e6afde2f557bd284e50f6dc
|
[
"MIT"
] | null | null | null |
apps/user/urls.py
|
dimastbk/x-one_test
|
aedf4dd4c5299c1f6e6afde2f557bd284e50f6dc
|
[
"MIT"
] | null | null | null |
from rest_framework.routers import DefaultRouter
from apps.user.views import AuthViewSet, UserViewSet
router = DefaultRouter()
router.register("", AuthViewSet, basename="auth")
router.register("", UserViewSet, basename="user")
urlpatterns = router.urls
| 28.333333 | 52 | 0.796078 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.062745 |
055c91bef8da3c2b5ab9913ec9ae41927e8fef83
| 1,514 |
py
|
Python
|
evkit/utils/misc.py
|
joel99/midlevel-reps
|
f0b4a4d8ccf09a0488cd18af24723172aff99446
|
[
"MIT"
] | 120 |
2019-04-22T04:45:28.000Z
|
2022-03-23T01:53:17.000Z
|
evkit/utils/misc.py
|
joel99/midlevel-reps
|
f0b4a4d8ccf09a0488cd18af24723172aff99446
|
[
"MIT"
] | 14 |
2019-06-12T08:21:21.000Z
|
2021-08-25T15:36:58.000Z
|
evkit/utils/misc.py
|
joel99/midlevel-reps
|
f0b4a4d8ccf09a0488cd18af24723172aff99446
|
[
"MIT"
] | 19 |
2019-06-19T07:00:36.000Z
|
2022-03-24T07:18:30.000Z
|
import collections
import torch
import pprint
import string
remove_whitespace = str.maketrans('', '', string.whitespace)
def cfg_to_md(cfg, uuid):
''' Because tensorboard uses markdown'''
return uuid + "\n\n " + pprint.pformat((cfg)).replace("\n", " \n").replace("\n \'", "\n \'") + ""
def is_interactive():
try:
ip = get_ipython()
return ip.has_trait('kernel')
except:
return False
def is_cuda(model):
return next(model.parameters()).is_cuda
class Bunch(object):
def __init__(self, adict):
self.__dict__.update(adict)
self._keys, self._vals = zip(*adict.items())
self._keys, self._vals = list(self._keys), list(self._vals)
def keys(self):
return self._keys
def vals(self):
return self._vals
def compute_weight_norm(parameters):
''' no grads! '''
total = 0.0
count = 0
for p in parameters:
total += torch.sum(p.data**2)
# total += p.numel()
count += p.numel()
return (total / count)
def get_number(name):
"""
use regex to get the first integer in the name
if none exists, return -1
"""
try:
num = int(re.findall("[0-9]+", name)[0])
except:
num = -1
return num
def update_dict_deepcopy(d, u): # we need a deep dictionary update
for k, v in u.items():
if isinstance(v, collections.Mapping):
d[k] = update_dict_deepcopy(d.get(k, {}), v)
else:
d[k] = v
return d
| 22.939394 | 110 | 0.579921 | 302 | 0.199472 | 0 | 0 | 0 | 0 | 0 | 0 | 264 | 0.174373 |
055cc455230997c5276c879e8d734a4e3c932b7e
| 1,652 |
py
|
Python
|
g13gui/g13/manager_tests.py
|
jtgans/g13gui
|
aa07ee91b0fd89eb8d9991291e11ca3a97ca11cc
|
[
"MIT"
] | 3 |
2021-10-16T01:28:24.000Z
|
2021-12-07T21:49:54.000Z
|
g13gui/g13/manager_tests.py
|
jtgans/g13gui
|
aa07ee91b0fd89eb8d9991291e11ca3a97ca11cc
|
[
"MIT"
] | 12 |
2021-05-09T16:57:18.000Z
|
2021-06-16T19:20:57.000Z
|
g13gui/g13/manager_tests.py
|
jtgans/g13gui
|
aa07ee91b0fd89eb8d9991291e11ca3a97ca11cc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import unittest
import time
import usb.util
from g13gui.observer.observer import ObserverTestCase
from g13gui.model.prefs import Preferences
from g13gui.g13.manager import DeviceManager
from g13gui.g13.manager import LCD_BUFFER_SIZE
class DeviceManagerTests(ObserverTestCase):
def setUp(self):
prefs = Preferences()
self.m = DeviceManager(prefs)
self.m.start()
while self.m.state != DeviceManager.State.FOUND:
time.sleep(1)
self.assertEqual(self.m.state, DeviceManager.State.FOUND)
def tearDown(self):
self.m.shutdown()
self.m.join()
def testLeds(self):
for i in range(0, 17):
self.m.setLedsMode(i)
def testBacklight(self):
for i in range(0, 256):
self.m.setBacklightColor(i, 0, 0)
for i in range(0, 256):
self.m.setBacklightColor(0, i, 0)
for i in range(0, 256):
self.m.setBacklightColor(0, 0, i)
for i in range(0, 256):
self.m.setBacklightColor(i, i, 0)
for i in range(0, 256):
self.m.setBacklightColor(0, i, i)
for i in range(0, 256):
self.m.setBacklightColor(i, 0, i)
for i in range(0, 256):
self.m.setBacklightColor(i, i, i)
def testLCD(self):
whiteBuffer = [0x5A] * LCD_BUFFER_SIZE
blackBuffer = [0xA5] * LCD_BUFFER_SIZE
for i in range(1, 10):
self.m.setLCDBuffer(whiteBuffer)
time.sleep(0.5)
self.m.setLCDBuffer(blackBuffer)
time.sleep(0.5)
if __name__ == '__main__':
unittest.main()
| 24.656716 | 65 | 0.598668 | 1,347 | 0.815375 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.016344 |
055df8a4d5bc728dd507e18c15a01996fcd7eeb9
| 754 |
py
|
Python
|
mpikat/utils/unix_socket.py
|
ewanbarr/mpikat
|
1c9a7376f9e79dfeec5a151d8f483d6fdf3e7cc9
|
[
"MIT"
] | 2 |
2018-11-12T12:17:27.000Z
|
2019-02-08T15:44:14.000Z
|
mpikat/utils/unix_socket.py
|
ewanbarr/mpikat
|
1c9a7376f9e79dfeec5a151d8f483d6fdf3e7cc9
|
[
"MIT"
] | 3 |
2018-08-03T12:05:20.000Z
|
2018-08-03T12:13:53.000Z
|
mpikat/utils/unix_socket.py
|
ewanbarr/mpikat
|
1c9a7376f9e79dfeec5a151d8f483d6fdf3e7cc9
|
[
"MIT"
] | 4 |
2019-01-21T16:31:34.000Z
|
2019-12-03T09:27:15.000Z
|
import socket
import logging
log = logging.getLogger('mpikat.utils.unix_socket')
class UDSClient(object):
def __init__(self, socket_name):
self._socket_name = socket_name
self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
self._sock.connect(self._socket_name)
except Exception:
log.exception("Unable to connect to Unix domain socket {}".format(
self._socket_name))
self._sock.settimeout(2)
def close(self):
self._sock.close()
def send(self, message):
message += "\r\n"
self._sock.sendall(message)
def recv(self, maxsize=8192, timeout=2):
self._sock.settimeout(2)
return self._sock.recv(maxsize)
| 26.928571 | 78 | 0.635279 | 669 | 0.887268 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.100796 |
0560a6e08907adcfebf943f18a20892cd59deb17
| 311 |
py
|
Python
|
Exercises/python/Cod1/cod1.py
|
Rick222555000/Princess
|
29c4c22351eeb8f2124ffe63632351fa373668e8
|
[
"MIT"
] | null | null | null |
Exercises/python/Cod1/cod1.py
|
Rick222555000/Princess
|
29c4c22351eeb8f2124ffe63632351fa373668e8
|
[
"MIT"
] | null | null | null |
Exercises/python/Cod1/cod1.py
|
Rick222555000/Princess
|
29c4c22351eeb8f2124ffe63632351fa373668e8
|
[
"MIT"
] | null | null | null |
#1- Crie um programa que ler 3 números inteiros A, B, C e exibe a mensagem se o resultado R=(A+B)/C é maior que B ou não.
A, B, C = int(input()), int(input()), int(input())
R = (A + B)/C
def Maior(R, B):
if (R > B):
return 'R é maior que B.'
else:
return 'R não é maior que B.'
print(Maior(R, B))
| 25.916667 | 121 | 0.59164 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.526814 |
0560aa251cb9f57348aa3861ec51b4ed5e27e782
| 1,021 |
py
|
Python
|
mlearn/static/py/funcs.py
|
achandir/django-machine-learning-beta
|
9604953addee0c1bea90d308b4248a69d332f5a8
|
[
"BSD-3-Clause"
] | null | null | null |
mlearn/static/py/funcs.py
|
achandir/django-machine-learning-beta
|
9604953addee0c1bea90d308b4248a69d332f5a8
|
[
"BSD-3-Clause"
] | null | null | null |
mlearn/static/py/funcs.py
|
achandir/django-machine-learning-beta
|
9604953addee0c1bea90d308b4248a69d332f5a8
|
[
"BSD-3-Clause"
] | null | null | null |
from django.core.files.storage import FileSystemStorage
from django.conf import settings
import os
class OverwriteStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
"""
Returns a filename that's free on the target storage system, and
available for new content to be written to.
"""
# If the filename already exists, remove it as if it was a true file system
if self.exists(name):
os.remove(os.path.join(settings.MEDIA_ROOT, name))
return name
class StrToList:
def strtolist(string):
'''
Transforms the string stored by Prepross model to list
'''
to_rem = ['[',
']',
'[]',
',']
string = string.replace(" ", "").split("'")
for i in to_rem:
try:
string = list(filter((i).__ne__, string))
except:
pass
return string
| 30.029412 | 84 | 0.539667 | 911 | 0.892262 | 0 | 0 | 0 | 0 | 0 | 0 | 320 | 0.313418 |
0561ca0895bec1e2984e237afabbb565849b0693
| 140 |
py
|
Python
|
room/services.py
|
F4ever/dots
|
b893802a78d4c8c1054a2c75c80dc1dd27d51eac
|
[
"Apache-2.0"
] | null | null | null |
room/services.py
|
F4ever/dots
|
b893802a78d4c8c1054a2c75c80dc1dd27d51eac
|
[
"Apache-2.0"
] | 6 |
2020-06-05T20:10:02.000Z
|
2021-09-22T18:09:52.000Z
|
room/services.py
|
F4ever/dots
|
b893802a78d4c8c1054a2c75c80dc1dd27d51eac
|
[
"Apache-2.0"
] | null | null | null |
class RoomCalculationService:
def __init__(self, room_id):
self.room_id = room_id
def calculate_results(self):
pass
| 23.333333 | 32 | 0.678571 | 140 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
05622f786bb071a97ceb1da54cab05760a5a36c8
| 624 |
py
|
Python
|
classes.py
|
thepfanner/CrisisComABM
|
919ab45ad522ec82806a6dff8ef8807a88e398d0
|
[
"MIT"
] | 1 |
2017-03-31T01:48:07.000Z
|
2017-03-31T01:48:07.000Z
|
classes.py
|
thepfanner/CrisisComABM
|
919ab45ad522ec82806a6dff8ef8807a88e398d0
|
[
"MIT"
] | null | null | null |
classes.py
|
thepfanner/CrisisComABM
|
919ab45ad522ec82806a6dff8ef8807a88e398d0
|
[
"MIT"
] | null | null | null |
__author__ = 'sp'
class Location:
def __init__(self, x, y):
self.x = x
self.y = y
class Location_ID:
def __init__(self, id, direct, x, y, range):
self.id = id
self.direct = direct
self.x = x
self.y = y
self.range = range
class Products():
def __init__(self, a, b):
self.a = a
self.b = b
class Circle():
def __init__(self, x, y, r):
self.x = x
self.y = y
self.r = r
class Network_Range_Circle(Circle):
def __init__(self, x=0, y=0, r=0, id=0):
Circle.__init__(self, x, y, r)
self.id = id
| 19.5 | 48 | 0.517628 | 595 | 0.953526 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.00641 |
0564823c9e294186f86aee5daa972c4a2f49f3f0
| 2,400 |
py
|
Python
|
app.py
|
saty2146/flask_api_log
|
760ac901b310649fe5dc98c6a8bdd0fdb5883a82
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
saty2146/flask_api_log
|
760ac901b310649fe5dc98c6a8bdd0fdb5883a82
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
saty2146/flask_api_log
|
760ac901b310649fe5dc98c6a8bdd0fdb5883a82
|
[
"Apache-2.0"
] | null | null | null |
#!venv/bin/python
import os, re, json
from flask import Flask, request
from flask_restful import Resource, Api
from json import dumps
from flask_jsonpify import jsonify
from flask import render_template
from boxes import *
app = Flask(__name__)
api = Api(app)
def get_logs(date, severity, box):
DATESTAMP_RE = r'(\w+\s+\d+)'
TIMESTAMP_RE = r'(\d+:\d+:\d+)'
DEVICE_IP_RE = r'(\S+)'
ERROR_SEVERITY_RE = r'<local7.(\S+)>'
ERROR_CODE_RE = r':\s+%(\S+):'
ERROR_MESSAGE_RE = r'(.*)'
COLUMN_DELIMITER_RE = r'\s+'
PADDING_RE = r'\s?(?:\d+\s+\w+\s+\d+\s+\d+:\d+:\d+\s+\w+)?'
keys = ['datestamp', 'timestamp', 'error_severity', 'device', 'error_code', 'error_message']
result_log = []
datestamp = str(date)
year = datestamp[0:4]
month = datestamp[4:6]
day = datestamp[-2:]
filename = datestamp + '.log'
ip_address_list = [ip for ip,name in boxes.iteritems() if name == box]
ip_address = ip_address_list[0]
workdir = '/var/log/cisco/' + year + '/' + month + '/' + ip_address + '/'
filepath = workdir + filename
SYSLOG_RE = (
DATESTAMP_RE + COLUMN_DELIMITER_RE +
TIMESTAMP_RE + COLUMN_DELIMITER_RE +
ERROR_SEVERITY_RE + COLUMN_DELIMITER_RE +
DEVICE_IP_RE + COLUMN_DELIMITER_RE + PADDING_RE +
ERROR_CODE_RE + COLUMN_DELIMITER_RE +
ERROR_MESSAGE_RE)
if os.path.isfile(filepath):
with open(filepath, mode = 'r') as syslog:
log_lines = syslog.readlines()
for line in reversed(log_lines):
matched = re.match(SYSLOG_RE, line)
if not matched:
continue
values = matched.groups()
result = dict(zip(keys, values))
result['device'] = boxes[result['device']]
if severity == 'all':
result_log.append(result)
elif result['error_severity'] == severity and result['device'] == box:
result_log.append(result)
else:
pass
return result_log
class Syslog(Resource):
def get(self):
date = request.args.get('date')
severity = request.args.get('severity')
box = request.args.get('box')
log = {"data": get_logs(date, severity, box)}
#return render_template('logs.html', logs = logs)
return jsonify(log)
api.add_resource(Syslog, '/syslog') # Route_1
if __name__ == '__main__':
app.run(host="217.73.28.16", port=5002)
| 30.379747 | 96 | 0.617917 | 310 | 0.129167 | 0 | 0 | 0 | 0 | 0 | 0 | 416 | 0.173333 |
056594b9b59d36dfeef52d15b7455e3dcb8e0bf9
| 1,362 |
py
|
Python
|
federateme.py
|
elitest/federateme.py
|
887d27ddae814d7ed03fd7c993493d927d2492d5
|
[
"Unlicense"
] | null | null | null |
federateme.py
|
elitest/federateme.py
|
887d27ddae814d7ed03fd7c993493d927d2492d5
|
[
"Unlicense"
] | null | null | null |
federateme.py
|
elitest/federateme.py
|
887d27ddae814d7ed03fd7c993493d927d2492d5
|
[
"Unlicense"
] | 1 |
2021-04-13T20:02:14.000Z
|
2021-04-13T20:02:14.000Z
|
#!/usr/bin/env python3
import boto.utils, json, requests
def detect_ec2():
try:
r = requests.get('http://169.254.169.254/latest/meta-data/ami-id')
print(r)
# probably should check for something in the response here.
return True
except:
return False
def gen_link():
s = json.dumps({'sessionId': boto.utils.get_instance_metadata()['identity-credentials']['ec2']['security-credentials']['ec2-instance']['AccessKeyId'],
'sessionKey': boto.utils.get_instance_metadata()['identity-credentials']['ec2']['security-credentials']['ec2-instance']['SecretAccessKey'],
'sessionToken': boto.utils.get_instance_metadata()['identity-credentials']['ec2']['security-credentials']['ec2-instance']['Token']})
r = requests.get("https://signin.aws.amazon.com/federation", params={'Action': 'getSigninToken', 'SessionDuration': 7200, 'Session': s})
t = r.json()
rs = requests.Request('GET', 'https://signin.aws.amazon.com/federation',
params={'Action': 'login', 'Issuer': 'Internet Widgets Pty.', 'Destination': 'https://console.aws.amazon.com/', 'SigninToken': t['SigninToken']})
l = rs.prepare()
return l.url
if detect_ec2():
print(gen_link())
else:
print("This is not an AWS instance. Please run on an AWS EC2 instance.")
| 41.272727 | 175 | 0.642438 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 715 | 0.524963 |
0565ccb5f3f8b36de113f3a2bcbbc32675fef341
| 58,839 |
py
|
Python
|
pysnmp-with-texts/FORCE10-MONITORING-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8 |
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/FORCE10-MONITORING-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4 |
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/FORCE10-MONITORING-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10 |
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module FORCE10-MONITORING-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/FORCE10-MONITORING-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:14:24 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint")
f10Mgmt, = mibBuilder.importSymbols("FORCE10-SMI", "f10Mgmt")
F10VlanID, F10CycloneVersion, F10ProcessorModuleType, F10PortPipeID, F10QueueID, F10SlotID = mibBuilder.importSymbols("FORCE10-TC", "F10VlanID", "F10CycloneVersion", "F10ProcessorModuleType", "F10PortPipeID", "F10QueueID", "F10SlotID")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
Gauge32, Counter32, Integer32, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, Bits, IpAddress, TimeTicks, Unsigned32, MibIdentifier, ObjectIdentity, NotificationType, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "Counter32", "Integer32", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "Bits", "IpAddress", "TimeTicks", "Unsigned32", "MibIdentifier", "ObjectIdentity", "NotificationType", "Counter64")
DisplayString, TextualConvention, MacAddress, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "MacAddress", "TruthValue")
f10MonitoringMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 6027, 3, 3))
f10MonitoringMib.setRevisions(('2008-12-18 12:00', '1906-01-20 00:00', '2000-11-02 10:30',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: f10MonitoringMib.setRevisionsDescriptions(('Force10 Monitoring MIB version 1.3 Added CPU Ingress Queue Unicast Statistics table. ', 'Force10 Monitoring MIB version 1.2 Added IP and ARP statistic objects that are not available in RFC1213. ', 'Force10 Monitoring MIB version 1.1',))
if mibBuilder.loadTexts: f10MonitoringMib.setLastUpdated('200812181200Z')
if mibBuilder.loadTexts: f10MonitoringMib.setOrganization('Force10 Networks, Inc.')
if mibBuilder.loadTexts: f10MonitoringMib.setContactInfo('Force10 Networks, Inc 1440 McCarthy Blvd Milpitas, CA 95035 (408) 571-3500 [email protected] http://www.force10networks.com')
if mibBuilder.loadTexts: f10MonitoringMib.setDescription('Force10 Monitoring MIB provides statistics and accounting for various Force10 products. ')
f10MonGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 3, 1))
f10MonQueue = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2))
f10MonMac = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 3, 3))
f10MonIfQueue = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4))
f10NetworkStat = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5))
f10IpStatistic = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5, 1))
f10ArpStatistic = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5, 2))
f10MonMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 3, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("version1", 1), ("version1dot1", 2), ("version1dot2", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10MonMibVersion.setStatus('current')
if mibBuilder.loadTexts: f10MonMibVersion.setDescription(' version1(1) - initial version, define QOS Queue Statistics table. version1dot1(2) - support MAC Accounting (f10MonMac). version1dot2(3) - support Interface Queue Statistics Tables (f10MonIfQueue). ')
f10MonQueueGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 1))
f10MonMaxQueue = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10MonMaxQueue.setStatus('current')
if mibBuilder.loadTexts: f10MonMaxQueue.setDescription('The maximum number of Force10 QOS queue supported by Force10 Interfaces. ')
f10InQueueStatisticsTable = MibTable((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 2), )
if mibBuilder.loadTexts: f10InQueueStatisticsTable.setStatus('current')
if mibBuilder.loadTexts: f10InQueueStatisticsTable.setDescription('The Force10 QOS Input Queue Statistics Table. This table provides Input Queue statistics for Force10 Interfaces. ')
f10InQueueStatisticsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "FORCE10-MONITORING-MIB", "f10InQueueId"))
if mibBuilder.loadTexts: f10InQueueStatisticsEntry.setStatus('current')
if mibBuilder.loadTexts: f10InQueueStatisticsEntry.setDescription('An entry in the Force10 QOS Input Queue table. The Input Queue Statistics Table is indexed by the Interface and the Queue ID. The Interface index should be an valid ifIndex as defined in the RFC1213 MIB II Interface Table and the Queue ID should be a valid Force10 Queue ID. ')
f10InQueueId = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 2, 1, 1), F10QueueID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10InQueueId.setStatus('current')
if mibBuilder.loadTexts: f10InQueueId.setDescription('This is the second index of this table, it must be a valid Force10 QOS Queue ID. ')
f10InQueueDropPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 2, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10InQueueDropPackets.setStatus('current')
if mibBuilder.loadTexts: f10InQueueDropPackets.setDescription(' ')
f10InQueueBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 2, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10InQueueBytes.setStatus('current')
if mibBuilder.loadTexts: f10InQueueBytes.setDescription(' ')
f10InQueueMatchPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 2, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10InQueueMatchPackets.setStatus('current')
if mibBuilder.loadTexts: f10InQueueMatchPackets.setDescription(' ')
f10InQueueMatchBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 2, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10InQueueMatchBytes.setStatus('current')
if mibBuilder.loadTexts: f10InQueueMatchBytes.setDescription(' ')
f10InQueueMatchBps = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 2, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10InQueueMatchBps.setStatus('current')
if mibBuilder.loadTexts: f10InQueueMatchBps.setDescription(' ')
f10InQueueCycloneVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 2, 1, 7), F10CycloneVersion()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10InQueueCycloneVersion.setStatus('current')
if mibBuilder.loadTexts: f10InQueueCycloneVersion.setDescription('The linecard Cyclone hardware version. ')
f10InQueueBytesCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 2, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10InQueueBytesCount.setStatus('current')
if mibBuilder.loadTexts: f10InQueueBytesCount.setDescription('The cumulative number of bytes data passing through this queue. This object is available on Cyclone version 2.0 (C2T2) hardware only. ')
f10InQueuePktsCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 2, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10InQueuePktsCount.setStatus('current')
if mibBuilder.loadTexts: f10InQueuePktsCount.setDescription('The cumulative number of packets passing through this queue. This object is available on Cyclone version 2.0 (C2T2) hardware only. ')
f10OutQueueStatisticsTable = MibTable((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 3), )
if mibBuilder.loadTexts: f10OutQueueStatisticsTable.setStatus('current')
if mibBuilder.loadTexts: f10OutQueueStatisticsTable.setDescription('The Force10 QOS Output Queue Statistics Table. This table provides Output Queue statistics for Force10 Interfaces. ')
f10OutQueueStatisticsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 3, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "FORCE10-MONITORING-MIB", "f10OutQueueId"))
if mibBuilder.loadTexts: f10OutQueueStatisticsEntry.setStatus('current')
if mibBuilder.loadTexts: f10OutQueueStatisticsEntry.setDescription('An entry in the Output Queue table. The Output Queue Statistics Table is indexed by the Interface and the Queue ID. The Interface index should be an valid ifIndex as defined in the RFC1213 MIB II Interface Table and the the Queue ID should be a valid Force10 Queue ID. ')
f10OutQueueId = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 3, 1, 1), F10QueueID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10OutQueueId.setStatus('current')
if mibBuilder.loadTexts: f10OutQueueId.setDescription('This is the second index of this table, it must be a valid Force10 QOS Queue ID. ')
f10OutQueuePackets = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 3, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10OutQueuePackets.setStatus('current')
if mibBuilder.loadTexts: f10OutQueuePackets.setDescription(' ')
f10OutQueueBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 3, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10OutQueueBytes.setStatus('current')
if mibBuilder.loadTexts: f10OutQueueBytes.setDescription('The number of bytes in the queue. This object is available on Cyclone version 1.5 (CjTj) hardware only. ')
f10OutQueueBps = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 3, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10OutQueueBps.setStatus('current')
if mibBuilder.loadTexts: f10OutQueueBps.setDescription(' ')
f10OutQueueCycloneVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 3, 1, 5), F10CycloneVersion()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10OutQueueCycloneVersion.setStatus('current')
if mibBuilder.loadTexts: f10OutQueueCycloneVersion.setDescription('The linecard Cyclone hardware version. ')
f10OutQueueBytesCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 3, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10OutQueueBytesCount.setStatus('current')
if mibBuilder.loadTexts: f10OutQueueBytesCount.setDescription('The cumulative number of bytes data passing through this queue. This object is available on Cyclone version 2.0 (C2T2) hardware only. ')
f10WredStatisticsTable = MibTable((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4), )
if mibBuilder.loadTexts: f10WredStatisticsTable.setStatus('current')
if mibBuilder.loadTexts: f10WredStatisticsTable.setDescription('QOS WRED Statistics Table This table provides QOS WRED statistics for the Force10 Interfaces. ')
f10WredStatisticsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "FORCE10-MONITORING-MIB", "f10WredQueueId"))
if mibBuilder.loadTexts: f10WredStatisticsEntry.setStatus('current')
if mibBuilder.loadTexts: f10WredStatisticsEntry.setDescription('An entry in the WRED Statistics table. The WRED Statistics Table is indexed by the Interface and the Queue ID. The Interface index should be an valid ifIndex as defined in the RFC1213 MIB II Interface Table and the Queue ID should be a valid Force10 Queue ID. ')
f10WredQueueId = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 1), F10QueueID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredQueueId.setStatus('current')
if mibBuilder.loadTexts: f10WredQueueId.setDescription('This is the second index of this table, it must be a valid Force10 QOS Queue ID. ')
f10WredGreenName = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredGreenName.setStatus('current')
if mibBuilder.loadTexts: f10WredGreenName.setDescription(' ')
f10WredGreenThresholdLow = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredGreenThresholdLow.setStatus('current')
if mibBuilder.loadTexts: f10WredGreenThresholdLow.setDescription(' ')
f10WredGreenThresholdHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredGreenThresholdHigh.setStatus('current')
if mibBuilder.loadTexts: f10WredGreenThresholdHigh.setDescription(' ')
f10WredGreenDropPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredGreenDropPackets.setStatus('current')
if mibBuilder.loadTexts: f10WredGreenDropPackets.setDescription(' ')
f10WredGreenReserve1 = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredGreenReserve1.setStatus('current')
if mibBuilder.loadTexts: f10WredGreenReserve1.setDescription(' ')
f10WredGreenReserve2 = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredGreenReserve2.setStatus('current')
if mibBuilder.loadTexts: f10WredGreenReserve2.setDescription(' ')
f10WredYellowName = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredYellowName.setStatus('current')
if mibBuilder.loadTexts: f10WredYellowName.setDescription(' ')
f10WredYellowThresholdLow = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 9), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredYellowThresholdLow.setStatus('current')
if mibBuilder.loadTexts: f10WredYellowThresholdLow.setDescription(' ')
f10WredYellowThresholdHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 10), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredYellowThresholdHigh.setStatus('current')
if mibBuilder.loadTexts: f10WredYellowThresholdHigh.setDescription(' ')
f10WredYellowDropPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredYellowDropPackets.setStatus('current')
if mibBuilder.loadTexts: f10WredYellowDropPackets.setDescription(' ')
f10WredYellowReserve1 = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredYellowReserve1.setStatus('current')
if mibBuilder.loadTexts: f10WredYellowReserve1.setDescription(' ')
f10WredYellowReserve2 = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredYellowReserve2.setStatus('current')
if mibBuilder.loadTexts: f10WredYellowReserve2.setDescription(' ')
f10WredRedName = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 14), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredRedName.setStatus('current')
if mibBuilder.loadTexts: f10WredRedName.setDescription(' ')
f10WredRedThresholdLow = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 15), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredRedThresholdLow.setStatus('current')
if mibBuilder.loadTexts: f10WredRedThresholdLow.setDescription(' ')
f10WredRedThresholdHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 16), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredRedThresholdHigh.setStatus('current')
if mibBuilder.loadTexts: f10WredRedThresholdHigh.setDescription(' ')
f10WredRedDropPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 17), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredRedDropPackets.setStatus('current')
if mibBuilder.loadTexts: f10WredRedDropPackets.setDescription(' ')
f10WredRedReserve1 = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 18), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredRedReserve1.setStatus('current')
if mibBuilder.loadTexts: f10WredRedReserve1.setDescription(' ')
f10WredRedReserve2 = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 19), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredRedReserve2.setStatus('current')
if mibBuilder.loadTexts: f10WredRedReserve2.setDescription(' ')
f10MacGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 3, 3, 1))
f10MacAccounting = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 3, 3, 2))
f10MacAccountingDestTable = MibTable((1, 3, 6, 1, 4, 1, 6027, 3, 3, 3, 2, 1), )
if mibBuilder.loadTexts: f10MacAccountingDestTable.setStatus('current')
if mibBuilder.loadTexts: f10MacAccountingDestTable.setDescription('The MAC Accounting Destination Table. Each entry in the table provides the MAC accounting statistics from a specific Interface, VLAN ID, and the desired destination MAC Address. ')
f10MacAccountingDestEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6027, 3, 3, 3, 2, 1, 1), ).setIndexNames((0, "FORCE10-MONITORING-MIB", "f10MacAccInIfIndex"), (0, "FORCE10-MONITORING-MIB", "f10MacAccVlan"), (0, "FORCE10-MONITORING-MIB", "f10MacAccMacAddr"))
if mibBuilder.loadTexts: f10MacAccountingDestEntry.setStatus('current')
if mibBuilder.loadTexts: f10MacAccountingDestEntry.setDescription('An entry in the MAC Accounting Destination Table. The MAC Accounting Destination table is indexed by the input Interface, VLAN ID, and the destination MAC Address. ')
f10MacAccInIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 3, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10MacAccInIfIndex.setStatus('current')
if mibBuilder.loadTexts: f10MacAccInIfIndex.setDescription('The input Interface of this entry of the table. The value should be a valid ifIndex in the MIB II Interface Table. ')
f10MacAccVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 3, 2, 1, 1, 2), F10VlanID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10MacAccVlan.setStatus('current')
if mibBuilder.loadTexts: f10MacAccVlan.setDescription('The VLAN ID. ')
f10MacAccMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 3, 2, 1, 1, 3), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10MacAccMacAddr.setStatus('current')
if mibBuilder.loadTexts: f10MacAccMacAddr.setDescription("The MAC Address that identifies this entry of the table. This is the destination MAC Address of the packets that's going through the Interface identified by f10MacAccInIfIndex. ")
f10MacAccOutIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 3, 2, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10MacAccOutIfIndex.setStatus('current')
if mibBuilder.loadTexts: f10MacAccOutIfIndex.setDescription('The output Interface of this entry of the table. The value should be a valid ifIndex in the MIB II Interface Table. ')
f10MacAccPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 3, 2, 1, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10MacAccPackets.setStatus('current')
if mibBuilder.loadTexts: f10MacAccPackets.setDescription('The number of packets going through this entry of the the table, identified by the Interface/MAC/VLAN. ')
f10MacAccBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 3, 2, 1, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10MacAccBytes.setStatus('current')
if mibBuilder.loadTexts: f10MacAccBytes.setDescription('The number of bytes traffic going through this entry of the table, identified by the Interface/MAC/VLAN. ')
f10MonIfQueueGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 1))
f10IngQueueUnicastStatTable = MibTable((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2), )
if mibBuilder.loadTexts: f10IngQueueUnicastStatTable.setStatus('current')
if mibBuilder.loadTexts: f10IngQueueUnicastStatTable.setDescription('The Ingress Queue Unicast Statistics Table. This table provides Queue statistics for Ingress Unicast packets between Force10 linecards. ')
f10IngQueueUnicastStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1), ).setIndexNames((0, "FORCE10-MONITORING-MIB", "f10IngUnicastSrcCard"), (0, "FORCE10-MONITORING-MIB", "f10IngUnicastDestCard"), (0, "FORCE10-MONITORING-MIB", "f10IngUnicastSrcPortPipe"), (0, "FORCE10-MONITORING-MIB", "f10IngUnicastDestPortPipe"), (0, "FORCE10-MONITORING-MIB", "f10IngUnicastQueueId"))
if mibBuilder.loadTexts: f10IngQueueUnicastStatEntry.setStatus('current')
if mibBuilder.loadTexts: f10IngQueueUnicastStatEntry.setDescription('An entry in the Ingress Queue Unicast Statistics table. The Ingress Queue Unicast Statistics Table is indexed by the source and destination linecard/portpipe and Queue ID. ')
f10IngUnicastSrcCard = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 1), F10SlotID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastSrcCard.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastSrcCard.setDescription('This is the source linecard number. This is the first index of this table entry. ')
f10IngUnicastDestCard = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 2), F10SlotID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastDestCard.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastDestCard.setDescription('This is the destination linecard number. This is the 3rd index of this table entry. ')
f10IngUnicastSrcPortPipe = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 3), F10PortPipeID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastSrcPortPipe.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastSrcPortPipe.setDescription('This is the Force10 Cyclone PortPipe number of the source linecard. This is the 2nd index of this table entry. ')
f10IngUnicastDestPortPipe = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 4), F10PortPipeID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastDestPortPipe.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastDestPortPipe.setDescription('This is the Force10 Cyclone PortPipe number of the destination linecard. This is the 4th index of this table entry. ')
f10IngUnicastQueueId = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 5), F10QueueID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastQueueId.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastQueueId.setDescription('This is the Queue ID of this entry. This is the 5th index of this table entry. ')
f10IngUnicastCycloneVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 6), F10CycloneVersion()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastCycloneVersion.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastCycloneVersion.setDescription('The linecard Cyclone hardware version. ')
f10IngUnicastBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastBytes.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastBytes.setDescription('The number of bytes in the queue. This object is available on Cyclone version 1.5 (CjTj) hardware only. ')
f10IngUnicastBytesCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastBytesCount.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastBytesCount.setDescription('The cumulative number of bytes data passing through this queue. This object is available on Cyclone version 2.0 (C2T2) and Cyclone version 3.0 (X3) hardwares only. ')
f10IngUnicastPacketCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastPacketCount.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastPacketCount.setDescription('The cumulative number of packets passing through this queue. This object is available on Cyclone version 2.0 (C2T2) and Cyclone version 3.0 (X3) hardwares only. ')
f10IngUnicastGreenMin = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 10), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastGreenMin.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastGreenMin.setDescription('The min threshold for Green packets. The min threshold identifies the queue size percentage at which the WRED dropping starts to be applied with a given configured probability. ')
f10IngUnicastGreenMax = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 11), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastGreenMax.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastGreenMax.setDescription('The max threshold for Green packets. The max threshold identifies the queue size level at which tail drops occurs. ')
f10IngUnicastGreenDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastGreenDrop.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastGreenDrop.setDescription('The number of Green packets being dropped in this queue. ')
f10IngUnicastYellowMin = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 13), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastYellowMin.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastYellowMin.setDescription('The min threshold for Yellow packets. The min threshold identifies the queue size percentage at which the WRED dropping starts to be applied with a given configured probability. ')
f10IngUnicastYellowMax = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 14), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastYellowMax.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastYellowMax.setDescription('The max threshold for Yellow packets. The max threshold identifies the queue size level at which tail drops occurs. ')
f10IngUnicastYellowDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 15), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastYellowDrop.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastYellowDrop.setDescription('The number of Yellow packets being dropped in this queue. ')
f10IngUnicastRedDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 16), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastRedDrop.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastRedDrop.setDescription('The number of Red packets being dropped in this queue. ')
f10IngQueueMulticastStatTable = MibTable((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3), )
if mibBuilder.loadTexts: f10IngQueueMulticastStatTable.setStatus('current')
if mibBuilder.loadTexts: f10IngQueueMulticastStatTable.setDescription('The Ingress Queue Multicast Statistics Table. This table provides Queue statistics for Ingress Multicast packets at Force10 linecards. ')
f10IngQueueMulticastStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1), ).setIndexNames((0, "FORCE10-MONITORING-MIB", "f10IngMulticastSrcCard"), (0, "FORCE10-MONITORING-MIB", "f10IngMulticastSrcPortPipe"), (0, "FORCE10-MONITORING-MIB", "f10IngMulticastQueueId"))
if mibBuilder.loadTexts: f10IngQueueMulticastStatEntry.setStatus('current')
if mibBuilder.loadTexts: f10IngQueueMulticastStatEntry.setDescription('An entry in the Ingress Queue Multicast Statistics table. The Ingress Queue Multicast Statistics Table is indexed by the source linecard/portpipe and Queue ID. ')
f10IngMulticastSrcCard = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 1), F10SlotID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastSrcCard.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastSrcCard.setDescription('This is the source linecard number. This is the first index of this table entry. ')
f10IngMulticastSrcPortPipe = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 2), F10PortPipeID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastSrcPortPipe.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastSrcPortPipe.setDescription('This is the Force10 Cyclone PortPipe number of the source linecard. This is the 2nd index of this table entry. ')
f10IngMulticastQueueId = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 3), F10QueueID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastQueueId.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastQueueId.setDescription('This is the Queue ID of this entry. This is the 3rd index of this table entry. ')
f10IngMulticastCycloneVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 4), F10CycloneVersion()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastCycloneVersion.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastCycloneVersion.setDescription('The linecard Cyclone hardware version. ')
f10IngMulticastBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastBytes.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastBytes.setDescription('The number of bytes in the queue. This object is available on Cyclone version 1.5 (CjTj) hardware only. ')
f10IngMulticastBytesCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastBytesCount.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastBytesCount.setDescription('The cumulative number of bytes data passing through this queue. This object is available on Cyclone version 2.0 (C2T2) and Cyclone version 3.0 (X3) hardwares only. ')
f10IngMulticastPacketCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastPacketCount.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastPacketCount.setDescription('The cumulative number of packets passing through this queue. This object is available on Cyclone version 2.0 (C2T2) and Cyclone version 3.0 (X3) hardwares only. ')
f10IngMulticastGreenMin = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 8), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastGreenMin.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastGreenMin.setDescription('The min threshold for Green packets. The min threshold identifies the queue size percentage at which the WRED dropping starts to be applied with a given configured probability. ')
f10IngMulticastGreenMax = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 9), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastGreenMax.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastGreenMax.setDescription('The max threshold for Green packets. The max threshold identifies the queue size level at which tail drops occurs. ')
f10IngMulticastGreenDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastGreenDrop.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastGreenDrop.setDescription('The number of Green packets being dropped in this queue. ')
f10IngMulticastYellowMin = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 11), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastYellowMin.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastYellowMin.setDescription('The min threshold for Yellow packets. The min threshold identifies the queue size percentage at which the WRED dropping starts to be applied with a given configured probability. ')
f10IngMulticastYellowMax = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 12), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastYellowMax.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastYellowMax.setDescription('The max threshold for Yellow packets. The max threshold identifies the queue size level at which tail drops occurs. ')
f10IngMulticastYellowDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastYellowDrop.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastYellowDrop.setDescription('The number of Yellow packets being dropped in this queue. ')
f10IngMulticastRedDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 14), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastRedDrop.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastRedDrop.setDescription('The number of Red packets being dropped in this queue. ')
f10EgQueueUnicastStatTable = MibTable((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4), )
if mibBuilder.loadTexts: f10EgQueueUnicastStatTable.setStatus('current')
if mibBuilder.loadTexts: f10EgQueueUnicastStatTable.setDescription('The Egress Queue Unicast Statistics Table. This table provides Queue statistics for Egress Unicast packets at Force10 Interface. ')
f10EgQueueUnicastStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "FORCE10-MONITORING-MIB", "f10EgUnicastQueueId"))
if mibBuilder.loadTexts: f10EgQueueUnicastStatEntry.setStatus('current')
if mibBuilder.loadTexts: f10EgQueueUnicastStatEntry.setDescription('An entry in the Egress Queue Unicast Statistics table. The Egress Queue Unicast Statistics Table is indexed by the ifIndex and Queue ID. The IfIndex should be an valid Interface Index as defined in the RFC1213 MIB II Interface Table. ')
f10EgUnicastQueueId = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 1), F10QueueID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastQueueId.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastQueueId.setDescription('This is the Queue ID of this entry. This is the 2nd index of this table entry. ')
f10EgUnicastCycloneVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 2), F10CycloneVersion()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastCycloneVersion.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastCycloneVersion.setDescription('The linecard Cyclone hardware version. ')
f10EgUnicastBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastBytes.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastBytes.setDescription('The number of bytes in the queue. This object is available on Cyclone version 1.5 (CjTj) hardware only. ')
f10EgUnicastBytesCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastBytesCount.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastBytesCount.setDescription('The cumulative number of bytes data passing through this queue. This object is available on Cyclone version 2.0 (C2T2) and Cyclone version 3.0 (X3) hardwares only. ')
f10EgUnicastPacketCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastPacketCount.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastPacketCount.setDescription('The cumulative number of packets passing through this queue. This object is available on Cyclone version 2.0 (C2T2) and Cyclone version 3.0 (X3) hardwares only. ')
f10EgUnicastGreenMin = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastGreenMin.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastGreenMin.setDescription('The min threshold for Green packets. The min threshold identifies the queue size percentage at which the WRED dropping starts to be applied with a given configured probability. ')
f10EgUnicastGreenMax = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 7), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastGreenMax.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastGreenMax.setDescription('The max threshold for Green packets. The max threshold identifies the queue size level at which tail drops occurs. ')
f10EgUnicastGreenDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastGreenDrop.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastGreenDrop.setDescription('The number of Green packets being dropped in this queue. ')
f10EgUnicastYellowMin = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 9), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastYellowMin.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastYellowMin.setDescription('The min threshold for Yellow packets. The min threshold identifies the queue size percentage at which the WRED dropping starts to be applied with a given configured probability. ')
f10EgUnicastYellowMax = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 10), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastYellowMax.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastYellowMax.setDescription('The max threshold for Yellow packets. The max threshold identifies the queue size level at which tail drops occurs. ')
f10EgUnicastYellowDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastYellowDrop.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastYellowDrop.setDescription('The number of Yellow packets being dropped in this queue. ')
f10EgUnicastRedDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastRedDrop.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastRedDrop.setDescription('The number of Red packets being dropped in this queue. ')
f10EgQueueMulticastStatTable = MibTable((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5), )
if mibBuilder.loadTexts: f10EgQueueMulticastStatTable.setStatus('current')
if mibBuilder.loadTexts: f10EgQueueMulticastStatTable.setDescription('The Egress Queue Multicast Statistics Table. This table provides Queue statistics for Egress Multicast packets at Force10 Interface. ')
f10EgQueueMulticastStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "FORCE10-MONITORING-MIB", "f10EgMulticastQueueId"))
if mibBuilder.loadTexts: f10EgQueueMulticastStatEntry.setStatus('current')
if mibBuilder.loadTexts: f10EgQueueMulticastStatEntry.setDescription('An entry in the Egress Queue Multicast Statistics table. The Egress Queue Multicast Statistics Table is indexed by the ifIndex and Queue ID. The IfIndex should be an valid Interface Index as defined in the RFC1213 MIB II Interface Table. ')
f10EgMulticastQueueId = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 1), F10QueueID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastQueueId.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastQueueId.setDescription('This is the Queue ID of this entry. This is the 2nd index of this table entry. ')
f10EgMulticastCycloneVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 2), F10CycloneVersion()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastCycloneVersion.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastCycloneVersion.setDescription('The linecard Cyclone hardware version. ')
f10EgMulticastBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastBytes.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastBytes.setDescription('The number of bytes in the queue. This object is available on Cyclone version 1.5 (CjTj) hardware only. ')
f10EgMulticastBytesCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastBytesCount.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastBytesCount.setDescription('The cumulative number of bytes data passing through this queue. This object is available on Cyclone version 2.0 (C2T2) and Cyclone version 3.0 (X3) hardwares only. ')
f10EgMulticastPacketCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastPacketCount.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastPacketCount.setDescription('The cumulative number of packets passing through this queue. This object is available on Cyclone version 2.0 (C2T2) and Cyclone version 3.0 (X3) hardwares only. ')
f10EgMulticastGreenMin = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastGreenMin.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastGreenMin.setDescription('The min threshold for Green packets. The min threshold identifies the queue size percentage at which the WRED dropping starts to be applied with a given configured probability. ')
f10EgMulticastGreenMax = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 7), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastGreenMax.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastGreenMax.setDescription('The max threshold for Green packets. The max threshold identifies the queue size level at which tail drops occurs. ')
f10EgMulticastGreenDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastGreenDrop.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastGreenDrop.setDescription('The max threshold for Green packets. The max threshold identifies the queue size level at which tail drops occurs. ')
f10EgMulticastYellowMin = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 9), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastYellowMin.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastYellowMin.setDescription('The min threshold for Yellow packets. The min threshold identifies the queue size percentage at which the WRED dropping starts to be applied with a given configured probability. ')
f10EgMulticastYellowMax = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 10), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastYellowMax.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastYellowMax.setDescription('The max threshold for Yellow packets. The max threshold identifies the queue size level at which tail drops occurs. ')
f10EgMulticastYellowDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastYellowDrop.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastYellowDrop.setDescription('The number of Yellow packets being dropped in this queue. ')
f10EgMulticastRedDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastRedDrop.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastRedDrop.setDescription('The number of Red packets being dropped in this queue. ')
f10CpuIngQueueUnicastStatTable = MibTable((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6), )
if mibBuilder.loadTexts: f10CpuIngQueueUnicastStatTable.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngQueueUnicastStatTable.setDescription('The CPU Ingress Queue Unicast Statistics Table. This table provides Queue statistics for Ingress Unicast packets destined for CPU.')
f10CpuIngQueueUnicastStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1), ).setIndexNames((0, "FORCE10-MONITORING-MIB", "f10CpuIngUnicastSrcCard"), (0, "FORCE10-MONITORING-MIB", "f10CpuIngUnicastSrcPortPipe"), (0, "FORCE10-MONITORING-MIB", "f10CpuIngUnicastDestCpu"), (0, "FORCE10-MONITORING-MIB", "f10CpuIngUnicastQueueId"))
if mibBuilder.loadTexts: f10CpuIngQueueUnicastStatEntry.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngQueueUnicastStatEntry.setDescription('An entry in the CPU Ingress Queue Unicast Statistics Table. The CPU Ingress Queue Unicast Statistics Table is indexed by the source linecard/portpipe, cpu port and Queue ID.')
f10CpuIngUnicastSrcCard = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 1), F10SlotID())
if mibBuilder.loadTexts: f10CpuIngUnicastSrcCard.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastSrcCard.setDescription('This is the source linecard number. This is the first index of this table entry.')
f10CpuIngUnicastSrcPortPipe = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 2), F10PortPipeID())
if mibBuilder.loadTexts: f10CpuIngUnicastSrcPortPipe.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastSrcPortPipe.setDescription('This is the Force10 Cyclone PortPipe number of the source linecard.This is the 2nd index of this table entry.')
f10CpuIngUnicastDestCpu = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 3), F10ProcessorModuleType())
if mibBuilder.loadTexts: f10CpuIngUnicastDestCpu.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastDestCpu.setDescription('This is the destination CPU port of this entry. This is the 3rd index of this table entry.')
f10CpuIngUnicastQueueId = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 4), F10QueueID())
if mibBuilder.loadTexts: f10CpuIngUnicastQueueId.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastQueueId.setDescription('This is the Queue ID of this entry. This is the 4th index of this table entry.')
f10CpuIngUnicastCycloneVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 5), F10CycloneVersion()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10CpuIngUnicastCycloneVersion.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastCycloneVersion.setDescription('The linecard Cyclone hardware version.')
f10CpuIngUnicastBytesCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10CpuIngUnicastBytesCount.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastBytesCount.setDescription('The cumulative number of bytes data passing through this queue. This object is available on Cyclone version 3.0 (X3) hardware only.')
f10CpuIngUnicastPacketCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10CpuIngUnicastPacketCount.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastPacketCount.setDescription('The cumulative number of packets passing through this queue. This object is available on Cyclone version 3.0 (X3) hardware only.')
f10CpuIngUnicastGreenMin = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 8), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10CpuIngUnicastGreenMin.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastGreenMin.setDescription('The min threshold for Green packets. The min threshold identifies the queue size percentage at which the WRED dropping starts to be applied with a given configured probability.')
f10CpuIngUnicastGreenMax = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 9), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10CpuIngUnicastGreenMax.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastGreenMax.setDescription('The max threshold for Green packets. The max threshold identifies the queue size level at which tail drops occurs.')
f10CpuIngUnicastGreenDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10CpuIngUnicastGreenDrop.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastGreenDrop.setDescription('The number of Green packets being dropped in this queue.')
f10CpuIngUnicastYellowMin = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 11), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10CpuIngUnicastYellowMin.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastYellowMin.setDescription('The min threshold for Yellow packets. The min threshold identifies the queue size percentage at which the WRED dropping starts to be applied with a given configured probability.')
f10CpuIngUnicastYellowMax = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 12), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10CpuIngUnicastYellowMax.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastYellowMax.setDescription('The max threshold for Yellow packets. The max threshold identifies the queue size level at which tail drops occurs.')
f10CpuIngUnicastYellowDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10CpuIngUnicastYellowDrop.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastYellowDrop.setDescription('The number of Yellow packets being dropped in this queue.')
f10CpuIngUnicastRedDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 14), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10CpuIngUnicastRedDrop.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastRedDrop.setDescription('The number of Red packets being dropped in this queue.')
f10BcastPktRecv = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5, 1, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10BcastPktRecv.setStatus('current')
if mibBuilder.loadTexts: f10BcastPktRecv.setDescription('The total broadcast packet received. ')
f10BcastPktSent = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10BcastPktSent.setStatus('current')
if mibBuilder.loadTexts: f10BcastPktSent.setDescription('The total broadcast packet sent. ')
f10McastPktRecv = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10McastPktRecv.setStatus('current')
if mibBuilder.loadTexts: f10McastPktRecv.setDescription('The total multicast packet received. ')
f10McastPktSent = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10McastPktSent.setStatus('current')
if mibBuilder.loadTexts: f10McastPktSent.setDescription('The total multicast packet sent. ')
f10ArpReqRecv = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5, 2, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10ArpReqRecv.setStatus('current')
if mibBuilder.loadTexts: f10ArpReqRecv.setDescription('The total ARP request received. ')
f10ArpReqSent = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5, 2, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10ArpReqSent.setStatus('current')
if mibBuilder.loadTexts: f10ArpReqSent.setDescription('The total ARP request sent. ')
f10ArpReplyRecv = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5, 2, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10ArpReplyRecv.setStatus('current')
if mibBuilder.loadTexts: f10ArpReplyRecv.setDescription('The total ARP reply received. ')
f10ArpReplySent = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5, 2, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10ArpReplySent.setStatus('current')
if mibBuilder.loadTexts: f10ArpReplySent.setDescription('The total ARP reply sent. ')
f10ArpProxySent = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5, 2, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10ArpProxySent.setStatus('current')
if mibBuilder.loadTexts: f10ArpProxySent.setDescription('The total ARP proxy sent. ')
mibBuilder.exportSymbols("FORCE10-MONITORING-MIB", f10CpuIngUnicastYellowMin=f10CpuIngUnicastYellowMin, f10EgMulticastBytesCount=f10EgMulticastBytesCount, f10OutQueueStatisticsTable=f10OutQueueStatisticsTable, f10MacAccBytes=f10MacAccBytes, f10InQueueCycloneVersion=f10InQueueCycloneVersion, f10IngUnicastQueueId=f10IngUnicastQueueId, f10MacAccPackets=f10MacAccPackets, f10CpuIngUnicastSrcCard=f10CpuIngUnicastSrcCard, f10WredYellowThresholdLow=f10WredYellowThresholdLow, f10WredStatisticsEntry=f10WredStatisticsEntry, f10CpuIngUnicastPacketCount=f10CpuIngUnicastPacketCount, f10WredRedName=f10WredRedName, f10IngMulticastCycloneVersion=f10IngMulticastCycloneVersion, f10EgMulticastRedDrop=f10EgMulticastRedDrop, f10EgQueueMulticastStatEntry=f10EgQueueMulticastStatEntry, f10WredGreenThresholdLow=f10WredGreenThresholdLow, f10EgMulticastCycloneVersion=f10EgMulticastCycloneVersion, f10InQueueMatchBytes=f10InQueueMatchBytes, f10EgMulticastYellowMax=f10EgMulticastYellowMax, f10MonQueueGroup=f10MonQueueGroup, f10OutQueueBytes=f10OutQueueBytes, f10EgUnicastBytes=f10EgUnicastBytes, f10IngUnicastSrcCard=f10IngUnicastSrcCard, f10ArpStatistic=f10ArpStatistic, f10InQueueBytesCount=f10InQueueBytesCount, f10EgUnicastCycloneVersion=f10EgUnicastCycloneVersion, f10EgMulticastPacketCount=f10EgMulticastPacketCount, f10OutQueueStatisticsEntry=f10OutQueueStatisticsEntry, f10CpuIngUnicastGreenDrop=f10CpuIngUnicastGreenDrop, f10IngMulticastYellowMax=f10IngMulticastYellowMax, f10IngQueueMulticastStatTable=f10IngQueueMulticastStatTable, f10BcastPktRecv=f10BcastPktRecv, f10CpuIngQueueUnicastStatEntry=f10CpuIngQueueUnicastStatEntry, f10EgUnicastGreenMax=f10EgUnicastGreenMax, f10IngMulticastYellowMin=f10IngMulticastYellowMin, f10InQueueMatchBps=f10InQueueMatchBps, f10InQueueStatisticsEntry=f10InQueueStatisticsEntry, f10CpuIngUnicastGreenMax=f10CpuIngUnicastGreenMax, f10WredRedReserve1=f10WredRedReserve1, f10WredRedReserve2=f10WredRedReserve2, f10IngMulticastQueueId=f10IngMulticastQueueId, f10EgMulticastYellowDrop=f10EgMulticastYellowDrop, f10InQueueDropPackets=f10InQueueDropPackets, f10OutQueuePackets=f10OutQueuePackets, f10IngUnicastYellowMax=f10IngUnicastYellowMax, f10EgQueueMulticastStatTable=f10EgQueueMulticastStatTable, f10CpuIngUnicastSrcPortPipe=f10CpuIngUnicastSrcPortPipe, f10CpuIngUnicastBytesCount=f10CpuIngUnicastBytesCount, f10InQueuePktsCount=f10InQueuePktsCount, f10IngMulticastSrcPortPipe=f10IngMulticastSrcPortPipe, f10EgUnicastPacketCount=f10EgUnicastPacketCount, f10IngMulticastGreenMin=f10IngMulticastGreenMin, f10EgQueueUnicastStatEntry=f10EgQueueUnicastStatEntry, f10CpuIngUnicastQueueId=f10CpuIngUnicastQueueId, f10MonQueue=f10MonQueue, f10EgMulticastGreenDrop=f10EgMulticastGreenDrop, f10IngUnicastGreenMin=f10IngUnicastGreenMin, f10IngQueueUnicastStatEntry=f10IngQueueUnicastStatEntry, f10IngMulticastBytesCount=f10IngMulticastBytesCount, f10OutQueueBps=f10OutQueueBps, f10IngMulticastSrcCard=f10IngMulticastSrcCard, f10WredYellowName=f10WredYellowName, f10MonMac=f10MonMac, f10WredYellowReserve1=f10WredYellowReserve1, f10InQueueBytes=f10InQueueBytes, f10MonMibVersion=f10MonMibVersion, f10ArpProxySent=f10ArpProxySent, f10ArpReplySent=f10ArpReplySent, f10MacAccOutIfIndex=f10MacAccOutIfIndex, f10BcastPktSent=f10BcastPktSent, f10IngUnicastCycloneVersion=f10IngUnicastCycloneVersion, f10EgUnicastRedDrop=f10EgUnicastRedDrop, f10InQueueStatisticsTable=f10InQueueStatisticsTable, f10WredStatisticsTable=f10WredStatisticsTable, f10OutQueueBytesCount=f10OutQueueBytesCount, f10IngUnicastBytes=f10IngUnicastBytes, f10CpuIngQueueUnicastStatTable=f10CpuIngQueueUnicastStatTable, f10CpuIngUnicastRedDrop=f10CpuIngUnicastRedDrop, f10IngUnicastYellowMin=f10IngUnicastYellowMin, f10InQueueId=f10InQueueId, f10MacAccounting=f10MacAccounting, f10MonIfQueueGroup=f10MonIfQueueGroup, f10ArpReqRecv=f10ArpReqRecv, f10IngMulticastPacketCount=f10IngMulticastPacketCount, f10IngUnicastGreenMax=f10IngUnicastGreenMax, f10IngMulticastYellowDrop=f10IngMulticastYellowDrop, PYSNMP_MODULE_ID=f10MonitoringMib, f10IngMulticastBytes=f10IngMulticastBytes, f10MonMaxQueue=f10MonMaxQueue, f10CpuIngUnicastDestCpu=f10CpuIngUnicastDestCpu, f10WredGreenName=f10WredGreenName, f10CpuIngUnicastYellowDrop=f10CpuIngUnicastYellowDrop, f10CpuIngUnicastGreenMin=f10CpuIngUnicastGreenMin, f10EgMulticastYellowMin=f10EgMulticastYellowMin, f10MonIfQueue=f10MonIfQueue, f10WredRedThresholdHigh=f10WredRedThresholdHigh, f10IngUnicastGreenDrop=f10IngUnicastGreenDrop, f10EgUnicastYellowMax=f10EgUnicastYellowMax, f10EgQueueUnicastStatTable=f10EgQueueUnicastStatTable, f10MacAccountingDestEntry=f10MacAccountingDestEntry, f10WredGreenDropPackets=f10WredGreenDropPackets, f10CpuIngUnicastYellowMax=f10CpuIngUnicastYellowMax, f10WredYellowReserve2=f10WredYellowReserve2, f10EgUnicastYellowDrop=f10EgUnicastYellowDrop, f10MacAccMacAddr=f10MacAccMacAddr, f10MacAccInIfIndex=f10MacAccInIfIndex, f10IpStatistic=f10IpStatistic, f10WredGreenThresholdHigh=f10WredGreenThresholdHigh, f10IngUnicastSrcPortPipe=f10IngUnicastSrcPortPipe, f10McastPktSent=f10McastPktSent, f10EgMulticastGreenMin=f10EgMulticastGreenMin, f10MonitoringMib=f10MonitoringMib, f10MonGroup=f10MonGroup, f10IngUnicastDestCard=f10IngUnicastDestCard, f10IngUnicastDestPortPipe=f10IngUnicastDestPortPipe, f10IngMulticastRedDrop=f10IngMulticastRedDrop, f10EgUnicastYellowMin=f10EgUnicastYellowMin, f10MacGroup=f10MacGroup, f10IngMulticastGreenDrop=f10IngMulticastGreenDrop, f10WredYellowDropPackets=f10WredYellowDropPackets, f10IngUnicastRedDrop=f10IngUnicastRedDrop, f10NetworkStat=f10NetworkStat, f10EgMulticastGreenMax=f10EgMulticastGreenMax, f10EgMulticastBytes=f10EgMulticastBytes, f10WredGreenReserve1=f10WredGreenReserve1, f10IngUnicastYellowDrop=f10IngUnicastYellowDrop, f10ArpReqSent=f10ArpReqSent, f10IngQueueUnicastStatTable=f10IngQueueUnicastStatTable, f10ArpReplyRecv=f10ArpReplyRecv, f10EgMulticastQueueId=f10EgMulticastQueueId, f10WredQueueId=f10WredQueueId, f10IngUnicastBytesCount=f10IngUnicastBytesCount, f10CpuIngUnicastCycloneVersion=f10CpuIngUnicastCycloneVersion, f10WredYellowThresholdHigh=f10WredYellowThresholdHigh, f10McastPktRecv=f10McastPktRecv, f10EgUnicastGreenMin=f10EgUnicastGreenMin, f10OutQueueId=f10OutQueueId, f10IngQueueMulticastStatEntry=f10IngQueueMulticastStatEntry, f10WredGreenReserve2=f10WredGreenReserve2, f10EgUnicastGreenDrop=f10EgUnicastGreenDrop, f10IngMulticastGreenMax=f10IngMulticastGreenMax, f10InQueueMatchPackets=f10InQueueMatchPackets, f10EgUnicastQueueId=f10EgUnicastQueueId, f10OutQueueCycloneVersion=f10OutQueueCycloneVersion, f10WredRedDropPackets=f10WredRedDropPackets, f10MacAccVlan=f10MacAccVlan, f10MacAccountingDestTable=f10MacAccountingDestTable, f10WredRedThresholdLow=f10WredRedThresholdLow, f10EgUnicastBytesCount=f10EgUnicastBytesCount, f10IngUnicastPacketCount=f10IngUnicastPacketCount)
| 131.044543 | 6,796 | 0.791261 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17,718 | 0.301127 |
056746e5dbf852638494e8c736e9cb3208ccd43b
| 1,964 |
py
|
Python
|
recycler.py
|
LAION-AI/crawlingathome
|
43a477777fb403046d67224747cde1dac9f2094a
|
[
"MIT"
] | 11 |
2021-06-02T03:46:52.000Z
|
2021-09-11T22:19:12.000Z
|
recycler.py
|
LAION-AI/crawlingathome
|
43a477777fb403046d67224747cde1dac9f2094a
|
[
"MIT"
] | 9 |
2021-06-14T07:46:20.000Z
|
2021-08-28T22:50:46.000Z
|
recycler.py
|
LAION-AI/crawlingathome
|
43a477777fb403046d67224747cde1dac9f2094a
|
[
"MIT"
] | 7 |
2021-06-01T11:59:36.000Z
|
2022-03-20T13:44:18.000Z
|
import numpy as np
from requests import session
from .core import CPUClient, GPUClient, HybridClient
from .temp import TempCPUWorker
from .errors import *
# Dump a client's attributes into a dictionary so that it can be used remotely.
def dump(c):
try:
return {
"_type": c.type,
"url": c.url,
"token": c.token,
"nickname": c.nickname,
"shard": c.shard if hasattr(c, 'shard') else None,
"start_id": str(c.start_id) if hasattr(c, 'start_id') else None,
"end_id": str(c.end_id) if hasattr(c, 'end_id') else None,
"shard_piece": c.shard_piece if hasattr(c, 'shard_piece') else None,
"wat": c.wat if hasattr(c, 'wat') else None,
"shards": c.shards if hasattr(c, 'shards') else None
}
except AttributeError as e:
raise DumpError(f"[crawling@home] unable to dump client: {e}")
# Load an existing client using its attributes. It's best to load using an existing dumpClient(): `loadClient(**dump)`
def load(_type=None, url=None, token=None, nickname=None, shard=None,
start_id=None, end_id=None, shard_piece=None, wat=None, shards=None):
if _type == "HYBRID":
c = HybridClient(*[None] * 2, _recycled=True)
elif _type == "CPU":
c = CPUClient(*[None] * 2, _recycled=True)
elif _type == "GPU":
c = GPUClient(*[None] * 2, _recycled=True)
elif _type == "FULLWAT":
c = TempCPUWorker(url, nickname, _recycled=True)
else:
raise ValueError(f"Invalid worker type: {_type}")
c.s = session()
c.type = _type
c.url = url
c.token = token
c.nickname = nickname
c.shard = shard
c.start_id = start_id if isinstance(start_id, np.int64) else np.int64(start_id)
c.end_id = end_id if isinstance(end_id, np.int64) else np.int64(end_id)
c.shard_piece = shard_piece
c.wat = wat
c.shards = shards
return c
| 35.709091 | 118 | 0.614562 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 431 | 0.21945 |
0567803d049b2b08966e5134ef97c6b64fdfc130
| 1,921 |
py
|
Python
|
config.py
|
uncharted-distil/distil-auto-ml
|
244661942cff11617c81830d7f58a9f9b5c9499d
|
[
"Apache-2.0"
] | 2 |
2019-06-20T23:32:10.000Z
|
2021-01-24T22:32:07.000Z
|
config.py
|
uncharted-distil/distil-auto-ml
|
244661942cff11617c81830d7f58a9f9b5c9499d
|
[
"Apache-2.0"
] | 157 |
2019-04-09T18:40:42.000Z
|
2021-05-06T13:44:33.000Z
|
config.py
|
uncharted-distil/distil-auto-ml
|
244661942cff11617c81830d7f58a9f9b5c9499d
|
[
"Apache-2.0"
] | 1 |
2019-07-12T22:17:46.000Z
|
2019-07-12T22:17:46.000Z
|
import os
DB_LOCATION = os.getenv("DB_URI", "test.db")
# Debug flag to output more verbose logging
# - defaults to False
DEBUG = os.getenv("DEBUG", False)
# Configurable output directory for saving machine learning model pickles
# - defaults to ../output
OUTPUT_DIR = os.getenv("OUTPUT_DIR", "output")
# Port to make worker service available on
PORT = os.getenv("PORT", "45042")
# Configurable filename for output logs
LOG_FILENAME = os.getenv("LOG_FILENAME", "distil-auto-ml.log")
# User agent to supply to TA3 Systems
SERVER_USER_AGENT = "qntfy_ta2"
# Primitives static file directory
D3MSTATICDIR = os.getenv("D3MSTATICDIR", "/static")
# Enable GPU pipelines - "auto" will try to detect, "true" and "false" will force
GPU = os.getenv("GPU", "auto")
# Batch size to apply to primitives where feasible
REMOTE_SENSING_BATCH_SIZE = int(os.getenv("REMOTE_SENSING_BATCH_SIZE", 128))
# Solution serach progress update message interval in seconds
PROGRESS_INTERVAL = float(os.getenv("PROGRESS_INTERVAL", 10.0))
# maximum number of augment columns to support
AUG_MAX_COLS = int(os.getenv("AUG_MAX_COLS", 50))
# maximum number of augment rows to support
AUG_MAX_ROWS = int(os.getenv("AUG_MAX_ROWS", 50000))
# maximum amount of time for hyperparam tuning in seconds
TIME_LIMIT = int(os.getenv("TIME_LIMIT", 600))
# use untuned/internally tuned pipelines (faster) or external tuning (better results)
HYPERPARAMETER_TUNING = os.getenv("HYPERPARAMETER_TUNING", "True") == "True"
# controls parallelism within primitives - defaults to the number of CPUs
N_JOBS = int(os.getenv("N_JOBS", -1))
# enable use of mlp classifier + gradcam visualization
MLP_CLASSIFIER = os.getenv("MLP_CLASSIFIER", "False") == "True"
# whether or not received features for remote sensing are pooled or not
IS_POOLED = os.getenv("POOL_FEATURES", "True") == "True"
COMPUTE_CONFIDENCES = os.getenv("COMPUTE_CONFIDENCES", "False") == "False"
| 34.303571 | 85 | 0.753774 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,291 | 0.672046 |
0567c00611e59a9c33c0140344f11e8c956bd4aa
| 278 |
py
|
Python
|
python/testData/completion/slots.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2 |
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/completion/slots.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173 |
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/completion/slots.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2 |
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
class A(object):
__slots__ = ['foo', 'bar']
a = A()
a.ba<caret>
class B(object):
__slots__ = ['bar']
class C(B):
pass
C().ba<caret>
class D(object):
pass
class E(D):
__slots__ = ['bar']
E().ba<caret>
class F:
__slots__ = ['baz']
F().ba<caret>
| 9.586207 | 30 | 0.535971 | 47 | 0.169065 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.089928 |
056887fff4c016e1bd810fe62a7c889a8d65cc5e
| 1,952 |
py
|
Python
|
aircraft_framework_win/framework_PhD/framework/Stability/Dynamic/state_vector.py
|
AlejandroRios/IAANDOCAC-aircraft-framework
|
9768e9736af70e20e8ef1cc0ad6501f3a28dbb47
|
[
"Apache-2.0"
] | null | null | null |
aircraft_framework_win/framework_PhD/framework/Stability/Dynamic/state_vector.py
|
AlejandroRios/IAANDOCAC-aircraft-framework
|
9768e9736af70e20e8ef1cc0ad6501f3a28dbb47
|
[
"Apache-2.0"
] | null | null | null |
aircraft_framework_win/framework_PhD/framework/Stability/Dynamic/state_vector.py
|
AlejandroRios/IAANDOCAC-aircraft-framework
|
9768e9736af70e20e8ef1cc0ad6501f3a28dbb47
|
[
"Apache-2.0"
] | null | null | null |
"""
Function :
Title :
Written by:
Email : [email protected]
Date :
Last edit :
Language : Python 3.8 or >
Aeronautical Institute of Technology - Airbus Brazil
Description:
-
Inputs:
-
Outputs:
-
TODO's:
-
"""
# =============================================================================
# IMPORTS
# =============================================================================
import numpy as np
# =============================================================================
# CLASSES
# =============================================================================
# =============================================================================
# FUNCTIONS
# =============================================================================
def state_vector(x, trim_par):
X = np.zeros((12, 1))
X[0] = x[0] # V
X[1] = x[1] # alpha
X[2] = x[2] # q
X[3] = x[3] # theta
X[4] = trim_par['H_m'] # H
X[6] = x[4] # beta
X[7] = x[5] # phi
X[8] = x[6] # p
X[9] = x[7] # r
X[10] = x[8] # r
return X
# =============================================================================
# MAIN
# =============================================================================
# =============================================================================
# TEST
# =============================================================================
# x = [68.0588,
# 0,
# 0,
# 0,
# 0,
# 0,
# 0,
# 0,
# 0,
# 0,
# 0,
# 0,
# 0,
# 0]
# trim_par = {}
# trim_par = {'V':68.0588,
# 'H_m':10000,
# 'chi_deg':0,
# 'gamma_deg':0,
# 'phi_dot_deg_s':0,
# 'theta_dot_deg':0,
# 'psi_dot_deg_s':0,
# 'beta_deg_eq':0,
# 'W':[0, 0, 0]}
# X = state_vector(x, trim_par)
# print(X)
| 22.436782 | 79 | 0.236168 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,623 | 0.831455 |
056958a4ad13bb68213d3b4a27aff485fb258a2f
| 317 |
py
|
Python
|
10-Days-of-Statistics/Python/day-0_weighted_mean.py
|
joaopalmeiro/hackerrank
|
271b87645710e5ed56cbfd8c4209f3a7436e3f72
|
[
"MIT"
] | null | null | null |
10-Days-of-Statistics/Python/day-0_weighted_mean.py
|
joaopalmeiro/hackerrank
|
271b87645710e5ed56cbfd8c4209f3a7436e3f72
|
[
"MIT"
] | null | null | null |
10-Days-of-Statistics/Python/day-0_weighted_mean.py
|
joaopalmeiro/hackerrank
|
271b87645710e5ed56cbfd8c4209f3a7436e3f72
|
[
"MIT"
] | null | null | null |
N = int(input())
X = list(map(int, input().split()))
W = list(map(int, input().split()))
def weighted_mean(X, W):
numerator = sum([a * b for a, b in zip(X, W)])
denominator = sum(W)
weighted_mean_value = numerator / denominator
return round(weighted_mean_value, 1)
print(weighted_mean(X, W))
| 16.684211 | 50 | 0.630915 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0569e6f550e0e8fb6bd11e2714deff2f7f71997f
| 2,274 |
py
|
Python
|
common/settings.py
|
hehanlin/jobbole
|
46d5fa26cfa1ebd5c6c3621f615ffecbb4152fa9
|
[
"BSD-3-Clause"
] | 2 |
2018-01-18T09:16:16.000Z
|
2022-02-12T08:59:23.000Z
|
common/settings.py
|
hehanlin/jobbole
|
46d5fa26cfa1ebd5c6c3621f615ffecbb4152fa9
|
[
"BSD-3-Clause"
] | null | null | null |
common/settings.py
|
hehanlin/jobbole
|
46d5fa26cfa1ebd5c6c3621f615ffecbb4152fa9
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
os_env = os.environ
class Config(object):
COMMON_PATH = os.path.abspath(os.path.dirname(__file__)) # This directory
PROJECT_ROOT = os.path.abspath(os.path.join(COMMON_PATH, os.pardir))
DATABASE_URL = "postgresql://he:he@localhost:5432/jobbole"
class CeleryConfig(object):
BROKER_URL = 'redis://[email protected]:6379/0' # 指定 Broker
CELERY_RESULT_BACKEND = 'redis://[email protected]:6379/1' # 指定 Backend
CELERY_TIMEZONE = 'Asia/Shanghai' # 指定时区,默认是 UTC
CELERY_ENABLE_UTC = True
CELERY_TASK_SERIALIZER = 'msgpack' # 任务序列化和反序列化 ls: json yaml msgpack pickle(不推荐)
CELERY_RESULT_SERIALIZER = 'json' # 读取任务结果一般性能要求不高,所以使用了可读性更好的JSON
CELERY_TASK_RESULT_EXPIRES = 60 * 60 * 24 # 任务过期时间,不建议直接写86400,应该让这样的magic数字表述更明显
CELERY_IMPORTS = ( # 指定导入的任务模块
)
# logging
LoggingConfig = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"simple": {
"format": "%(asctime)s- %(module)s:%(lineno)d [%(levelname)1.1s] %(name)s: %(message)s",
'datefmt': '%Y/%m/%d %H:%M:%S'
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "simple",
"stream": "ext://sys.stdout"
},
"info_file_handler": {
"class": "logging.handlers.RotatingFileHandler",
"level": "INFO",
"formatter": "simple",
"filename": Config.PROJECT_ROOT + '/jobbole_info.log',
"maxBytes": 10485760,
"backupCount": 20,
"encoding": "utf8"
},
"error_file_handler": {
"class": "logging.handlers.RotatingFileHandler",
"level": "ERROR",
"formatter": "simple",
"filename": Config.PROJECT_ROOT + '/jobbole_error.log',
"maxBytes": 10485760,
"backupCount": 20,
"encoding": "utf8"
}
},
"loggers": {
"my_module": {
"level": "ERROR",
"handlers": ["info_file_handler"],
"propagate": False
}
},
"root": {
"level": "INFO",
"handlers": ["console", "info_file_handler", "error_file_handler"]
}
}
| 30.72973 | 100 | 0.554969 | 940 | 0.383987 | 0 | 0 | 0 | 0 | 0 | 0 | 1,276 | 0.521242 |
056bdc49927b577c2ca6f33c088621f5b1d3d179
| 8,834 |
py
|
Python
|
interface.py
|
singularitai/Morphling
|
e7a3af969123c0d3c0f3c6f1036a97e9be0b289c
|
[
"MIT",
"Condor-1.1",
"Unlicense"
] | 9 |
2021-03-22T09:18:58.000Z
|
2022-03-02T01:42:11.000Z
|
interface.py
|
singularitai/Morphling
|
e7a3af969123c0d3c0f3c6f1036a97e9be0b289c
|
[
"MIT",
"Condor-1.1",
"Unlicense"
] | null | null | null |
interface.py
|
singularitai/Morphling
|
e7a3af969123c0d3c0f3c6f1036a97e9be0b289c
|
[
"MIT",
"Condor-1.1",
"Unlicense"
] | 2 |
2022-03-29T07:59:12.000Z
|
2022-03-31T09:10:47.000Z
|
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Mock.ui'
#
# Created by: PyQt5 UI code generator 5.12.3
#
# WARNING! All changes made in this file will be lost!
import sys
import subprocess
import application_backend as ab
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def __init__(self):
self.app = QtWidgets.QApplication(sys.argv)
self.MainWindow = QtWidgets.QMainWindow()
self.setupUi(self.MainWindow)
self.MainWindow.show()
sys.exit(self.app.exec_())
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(873, 663)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.gridLayoutWidget.setGeometry(QtCore.QRect(9, 9, 841, 631))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.SuperResolution = QtWidgets.QCheckBox(self.gridLayoutWidget)
self.SuperResolution.setChecked(True)
self.SuperResolution.setObjectName("SuperResolution")
self.gridLayout.addWidget(self.SuperResolution, 7, 0, 1, 1)
self.SingleSpeaker = QtWidgets.QCheckBox(self.gridLayoutWidget)
self.SingleSpeaker.setObjectName("SingleSpeaker")
self.gridLayout.addWidget(self.SingleSpeaker, 5, 0, 1, 1)
self.CustomFace = QtWidgets.QCheckBox(self.gridLayoutWidget)
self.CustomFace.setObjectName("CustomFace")
self.gridLayout.addWidget(self.CustomFace, 8, 0, 1, 1)
self.AudioOnly = QtWidgets.QCheckBox(self.gridLayoutWidget)
self.AudioOnly.setChecked(False)
self.AudioOnly.setObjectName("AudioOnly")
self.gridLayout.addWidget(self.AudioOnly, 3, 0, 1, 1)
self.Generate = QtWidgets.QPushButton(self.gridLayoutWidget)
self.Generate.setObjectName("Generate")
self.gridLayout.addWidget(self.Generate, 18, 0, 1, 1)
#self.gridLayout.addWidget(self.Generate, 13, 0, 1, 1)
self.SelectCustomFace = QtWidgets.QPushButton(self.gridLayoutWidget)
self.SelectCustomFace.setEnabled(False)
self.SelectCustomFace.setCheckable(False)
self.SelectCustomFace.setChecked(False)
self.SelectCustomFace.setFlat(False)
self.SelectCustomFace.setObjectName("SelectCustomFace")
self.gridLayout.addWidget(self.SelectCustomFace, 9, 0, 1, 1)
self.SmilingFace = QtWidgets.QCheckBox(self.gridLayoutWidget)
self.SmilingFace.setObjectName("SmilingFace")
self.gridLayout.addWidget(self.SmilingFace, 6, 0, 1, 1)
#self.progressBar = QtWidgets.QProgressBar(self.gridLayoutWidget)
#self.progressBar.setProperty("value", 24)
#self.progressBar.setObjectName("progressBar")
#self.gridLayout.addWidget(self.progressBar, 18, 0, 1, 1)
self.tacotron2 = QtWidgets.QCheckBox(self.gridLayoutWidget)
self.tacotron2.setObjectName("tacotron2")
#self.gridLayout.addWidget(self.tacotron2, 18, 0, 1, 1)
self.gridLayout.addWidget(self.tacotron2, 13, 0, 1, 1)
self.LJSpeech = QtWidgets.QCheckBox(self.gridLayoutWidget)
self.LJSpeech.setObjectName("LJSpeech")
self.gridLayout.addWidget(self.LJSpeech, 4, 0, 1, 1)
self.plainTextEdit = QtWidgets.QPlainTextEdit(self.gridLayoutWidget)
self.plainTextEdit.setObjectName("plainTextEdit")
self.gridLayout.addWidget(self.plainTextEdit, 1, 0, 1, 1)
self.VoiceNumber = QtWidgets.QSpinBox(self.gridLayoutWidget)
self.VoiceNumber.setEnabled(False)
self.VoiceNumber.setMaximum(123)
self.VoiceNumber.setObjectName("VoiceNumber")
self.gridLayout.addWidget(self.VoiceNumber, 11, 0, 1, 1)
self.SpecificVoice = QtWidgets.QCheckBox(self.gridLayoutWidget)
self.SpecificVoice.setObjectName("SpecificVoice")
self.gridLayout.addWidget(self.SpecificVoice, 10, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.CustomFace.clicked.connect(self.SelectCustomFace.show)
self.SpecificVoice.clicked.connect(self.VoiceNumber.show)
self.Generate.clicked.connect(self.generate_clicked)
self.SpecificVoice.clicked.connect(self.custom_voice_toggle)
self.CustomFace.clicked.connect(self.custom_face_toggle)
self.SelectCustomFace.clicked.connect(self.custom_face_clicked)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.custom_face_path = ""
def custom_voice_toggle(self):
if self.SpecificVoice.isChecked():
self.VoiceNumber.setEnabled(True)
else:
self.VoiceNumber.setEnabled(False)
def custom_face_toggle(self):
if self.CustomFace.isChecked():
self.SelectCustomFace.setEnabled(True)
self.SelectCustomFace.setCheckable(True)
self.SelectCustomFace.setChecked(True)
self.SelectCustomFace.setFlat(True)
else:
self.SelectCustomFace.setEnabled(False)
self.SelectCustomFace.setCheckable(False)
self.SelectCustomFace.setChecked(False)
self.SelectCustomFace.setFlat(False)
def custom_face_clicked(self):
import easygui
import os
path = easygui.fileopenbox()
cwd = os.getcwd()
if type(path) != type(None):
relative_path = os.path.relpath(path, cwd)
self.custom_face_path = relative_path
print(relative_path)
def generate_clicked(self):
text = self.plainTextEdit.toPlainText().replace("\n", ".")
mode = 0
audio_only = bool(self.AudioOnly.isChecked())
audio_model = 1
if(self.LJSpeech.isChecked()):
audio_model = 0
if(self.tacotron2.isChecked()):
audio_model = 2
single_speaker = self.SingleSpeaker.isChecked()
no_smiling = self.SmilingFace.isChecked()
super_resolution = self.SuperResolution.isChecked()
use_custom_face = self.CustomFace.isChecked()
use_custom_voice = self.SpecificVoice.isChecked()
custom_voice = self.VoiceNumber.value()
command = ""
command += "python application_backend -t {} ".format(text)
command += "-m {} ".format(mode)
command += "-ao {} ".format(audio_only)
command += "-am {} ".format(audio_model)
command += "-ss {} ".format(single_speaker)
command += "-sm {} ".format(no_smiling)
command += "-sr {} ".format(super_resolution)
command += "-ucf {} ".format(use_custom_face)
command += "-cf {} ".format(self.custom_face_path)
command += "-ucv {} ".format(use_custom_voice)
command += "-cv {}".format(custom_voice)
print(command)
ab.Generate(audio_model=audio_model, audio_only=audio_only, custom_face=self.custom_face_path, custom_voice=custom_voice,
full_text=text, mode=mode, single_speaker=single_speaker, smiling=no_smiling, super_resolution=super_resolution,
use_custom_face=use_custom_face, use_custom_voice=use_custom_voice)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "SingularitAI | Morphling Tool"))
self.SuperResolution.setText(_translate("MainWindow", "Apply Super Resolution"))
self.SingleSpeaker.setText(_translate("MainWindow", "Single Speaker and Voice"))
self.CustomFace.setText(_translate("MainWindow", "Use Custom Face"))
self.AudioOnly.setText(_translate("MainWindow", "Generate Audio Only"))
self.Generate.setText(_translate("MainWindow", "Generate"))
self.SelectCustomFace.setText(_translate("MainWindow", "Select Custom Face"))
self.SmilingFace.setText(_translate("MainWindow", "Avoid Smiling Faces"))
self.LJSpeech.setText(_translate("MainWindow", "Use LJ Speech"))
self.tacotron2.setText(_translate("MainWindow", "Use Tacotron Speech"))
self.SpecificVoice.setText(_translate("MainWindow", "Use Specific Voice"))
if __name__ == "__main__":
Ui_MainWindow()
| 43.732673 | 130 | 0.677043 | 8,433 | 0.954607 | 0 | 0 | 0 | 0 | 0 | 0 | 1,264 | 0.143084 |
056ef751fabceeae1db74a620559c093e5b86dfa
| 10,935 |
py
|
Python
|
load-testing/locustfile.py
|
MaksimAniskov/aws-global-odoo
|
0f225a2f4ede3215264fd3d3912fa7b4e87d4a8f
|
[
"MIT"
] | null | null | null |
load-testing/locustfile.py
|
MaksimAniskov/aws-global-odoo
|
0f225a2f4ede3215264fd3d3912fa7b4e87d4a8f
|
[
"MIT"
] | 1 |
2022-01-26T08:58:34.000Z
|
2022-01-26T08:58:34.000Z
|
load-testing/locustfile.py
|
MaksimAniskov/aws-global-odoo
|
0f225a2f4ede3215264fd3d3912fa7b4e87d4a8f
|
[
"MIT"
] | null | null | null |
from locust import HttpUser, task, between
import re
import random
import json
import os
class OdooUser:
if os.environ.get('HOST'):
host = os.environ.get('HOST')
wait_time = between(20, 40)
def on_start(self):
response = self.client.get("/web/login")
assert response.status_code == 200
csrf_token = re.search(
r'input type="hidden" name="csrf_token" value="(.+)"', response.text).group(1)
response = self.client.post(
"/web/login", data={
"csrf_token": csrf_token,
"login": os.environ.get('ODOO_USER_NAME'),
"password": os.environ.get('ODOO_USER_PASSWORD'),
"redirect": ""
})
assert response.status_code == 200
response = self.client.get("/web")
assert response.status_code == 200
session_info = re.search(
r'odoo.session_info\s*=\s*(.+);', response.text).groups(1)[0]
session_info = json.loads(session_info)
self.thecontext = {
"uid": session_info['uid'],
"company_id": session_info['company_id'],
"allowed_company_ids": [session_info['company_id']],
"lang": session_info['user_context']['lang'],
"tz": session_info['user_context']['tz']
}
response = self.client.get(
f'/web/webclient/load_menus/${session_info["cache_hashes"]["load_menus"]}')
assert response.status_code == 200
response = json.loads(response.content)
crm_menu = next(
filter(lambda item: item['name'] == 'CRM', response['children']))
self.crm_action_id = int(crm_menu['action'].split(',')[1])
self.call_jsonrpc(
"/web/dataset/call_kw/res.users/systray_get_activities",
model="res.users",
method="systray_get_activities",
kwargs={"context": self.thecontext},
args=[]
)
response = self.client.get(
"/web/image?model=res.users", params={'field': 'image_128', 'id': self.thecontext['uid']})
assert response.status_code == 200
response = self.call_action(
"/web/action/run", action_id=self.crm_action_id)
result = json.loads(response.content)['result']
self.thecontext.update(result['context'])
def call_jsonrpc(self, url, **params):
response = self.client.post(
url,
json={
"id": random.randrange(10000000000),
"params": {**params},
"jsonrpc": "2.0", "method": "call"
}
)
assert response.status_code == 200
response = json.loads(response.content)
assert 'error' not in response
return response['result']
def call_action(self, url, action_id):
response = self.client.post(
url,
json={
"id": random.randrange(10000000000),
"params": {
"action_id": action_id,
},
"jsonrpc": "2.0", "method": "call"
}
)
assert response.status_code == 200
assert 'error' not in json.loads(response.content)
return response
class OdooUserCrmKanban(OdooUser, HttpUser):
@task
def crm_kanban(self):
self.call_action("/web/action/run", action_id=self.crm_action_id)
domain = [
"&",
["type", "=", "opportunity"],
["user_id", "=", self.thecontext['uid']]
]
self.call_jsonrpc(
"/web/dataset/call_kw/crm.lead/read_progress_bar",
model="crm.lead", method="read_progress_bar",
kwargs={
"domain": domain,
"group_by": "stage_id",
"progress_bar": {
"field": "activity_state",
"colors": {
"planned": "success",
"today": "warning",
"overdue": "danger"
},
"sum_field": "expected_revenue",
"modifiers": {}
}
},
args=[]
)
result = self.call_jsonrpc(
"/web/dataset/call_kw/crm.lead/web_read_group",
model="crm.lead", method="web_read_group",
kwargs={
"domain": domain,
"fields": [
"stage_id",
"color",
"priority",
"expected_revenue",
"kanban_state",
"activity_date_deadline",
"user_email",
"user_id",
"partner_id",
"activity_summary",
"active",
"company_currency",
"activity_state",
"activity_ids",
"name",
"tag_ids",
"activity_exception_decoration",
"activity_exception_icon"
],
"groupby": ["stage_id"],
"orderby": "",
"lazy": True
},
args=[]
)
for group in result['groups']:
result = self.call_jsonrpc(
"/web/dataset/search_read",
model="crm.lead",
domain=[
"&", ["stage_id", "=", group['stage_id'][0]],
"&", ["type", "=", "opportunity"],
["user_id", "=", self.thecontext['uid']]
],
fields=[
"stage_id",
"color",
"priority",
"expected_revenue",
"kanban_state",
"activity_date_deadline",
"user_email",
"user_id",
"partner_id",
"activity_summary",
"active",
"company_currency",
"activity_state",
"activity_ids",
"name",
"tag_ids",
"activity_exception_decoration",
"activity_exception_icon"
],
limit=80,
sort="",
context={
"bin_size": True
}
)
# TODO: /web/dataset/call_kw/crm.tag/read
# TODO: /web/dataset/call_kw/crm.stage/read
class OdooUserCrmLeadCreate(OdooUser, HttpUser):
@task
def crm_lead_create(self):
partners = self.call_jsonrpc(
"/web/dataset/call_kw/res.partner/name_search",
model="res.partner", method="name_search",
kwargs={
"name": "",
"args": ["|", ["company_id", "=", False], ["company_id", "=", 1]],
"operator": "ilike",
"limit": 8
},
args=[]
)
random_partner_id = random.choice(partners)[0]
result = self.call_jsonrpc(
"/web/dataset/call_kw/crm.lead/onchange",
model="crm.lead", method="onchange",
kwargs={},
args=[
[],
{
"partner_id": random_partner_id,
"company_id": self.thecontext['company_id'],
"user_id": self.thecontext['uid'],
"team_id": self.thecontext['default_team_id'],
"name": False,
"email_from": False,
"phone": False,
"expected_revenue": 0,
"priority": "0",
"company_currency": 1,
"type": "opportunity",
"partner_name": False,
"contact_name": False,
"country_id": False,
"state_id": False,
"city": False,
"street": False,
"street2": False,
"zip": False,
"mobile": False,
"website": False,
"function": False,
"title": False
},
"partner_id",
{
"partner_id": "1",
"name": "",
"email_from": "",
"phone": "1",
"expected_revenue": "",
"priority": "",
"company_currency": "",
"company_id": "1",
"user_id": "1",
"team_id": "",
"type": "1",
"partner_name": "",
"contact_name": "",
"country_id": "1",
"state_id": "",
"city": "",
"street": "",
"street2": "",
"zip": "1",
"mobile": "1",
"website": "",
"function": "",
"title": ""
}
]
)
partner = result['value']
partner['id'] = random_partner_id
result = self.call_jsonrpc(
"/web/dataset/call_kw/crm.lead/create",
model="crm.lead", method="create",
kwargs={},
args=[{
"type": "opportunity",
"expected_revenue": random.randrange(1000, 1000000, 1000),
"company_id": self.thecontext['company_id'],
"user_id": self.thecontext['uid'],
"team_id": self.thecontext['default_team_id'],
"priority": "0",
"partner_id": partner['id'],
"name": partner.get('name', False),
"email_from": partner.get('email_from', False),
"phone": partner.get('phone', False),
"partner_name": partner.get('partner_name', False),
"contact_name": partner.get('contact_name', False),
"country_id": partner['country_id'][0],
"state_id": partner['state_id'][0],
"city": partner.get('city', False),
"street": partner.get('street', False),
"street2": partner.get('street2', False),
"zip": partner.get('zip', False),
"function": partner.get('function', False),
"title": partner.get('title', False)
}]
)
if result % 100 == 0:
print('CRM lead id created:', result)
if __name__ == "__main__":
from locust.env import Environment
my_env = Environment(user_classes=[OdooUserCrmKanban])
OdooUserCrmKanban(my_env).run()
| 34.714286 | 102 | 0.438317 | 10,674 | 0.976132 | 0 | 0 | 7,401 | 0.676818 | 0 | 0 | 3,131 | 0.286328 |
05702fee1b4a5bd092fcebf23643ddbeb574cdf2
| 939 |
py
|
Python
|
code/model/testSpeedPolar.py
|
PBarde/IBoatPIE
|
dd8038f981940b732be979b49e9b14102c3d4cca
|
[
"MIT"
] | 1 |
2018-02-22T15:38:01.000Z
|
2018-02-22T15:38:01.000Z
|
code/model/testSpeedPolar.py
|
PBarde/IBoatPIE
|
dd8038f981940b732be979b49e9b14102c3d4cca
|
[
"MIT"
] | null | null | null |
code/model/testSpeedPolar.py
|
PBarde/IBoatPIE
|
dd8038f981940b732be979b49e9b14102c3d4cca
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 13 18:03:27 2017
@author: paul
"""
from SimulatorTLKT import Boat
from SimulatorTLKT import FIT_VELOCITY
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from math import pi
matplotlib.rcParams.update({'font.size': 22})
pOfS=np.arange(0,360,0.5)
wMags=np.arange(0,25,2)
polars=[]
legends=[]
fig=plt.figure()
for mag in wMags:
pol=[]
legends.append('Wind mag = '+str(mag) + ' m/s')
for p in pOfS :
pol.append(Boat.getDeterDyn(p,mag,FIT_VELOCITY))
polars.append(list(pol))
ax=plt.polar(pOfS*pi/180,pol,label=str(mag) + ' m/s')
#plt.legend(legends)
plt.legend(bbox_to_anchor=(1.1,1), loc=2, borderaxespad=0.)
#plt.xlabel('Polar plot of Boat velocity [m/s] wrt. point of sail [deg]',fontsize=22)
#ax.xaxis.set_label_position('top')
fig.savefig('../../../Article/Figures/polar_modified2.pdf', bbox_inches='tight')
| 25.378378 | 85 | 0.690096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 332 | 0.353568 |
05705dae303e8a7ae7b9765283158fc78c1a5987
| 3,387 |
py
|
Python
|
src/mcxlib/usage_examples.py
|
carlashley/meecxprofile
|
1fe776b3f23dd9b224d87dd155cc1681cf13fb5e
|
[
"Apache-2.0"
] | 2 |
2021-09-10T12:52:43.000Z
|
2021-09-10T15:38:29.000Z
|
src/mcxlib/usage_examples.py
|
carlashley/meecxprofile
|
1fe776b3f23dd9b224d87dd155cc1681cf13fb5e
|
[
"Apache-2.0"
] | null | null | null |
src/mcxlib/usage_examples.py
|
carlashley/meecxprofile
|
1fe776b3f23dd9b224d87dd155cc1681cf13fb5e
|
[
"Apache-2.0"
] | null | null | null |
from pprint import pformat
ds_obj_mcx_note = ('The MCX data returned back from \'dscl\' is a string nested in the attribute queried.\n'
'Settings can be filtered by using key filters.\n'
'Multiple values can be filtered for specific domains by comma seperating the values\n'
'Filter syntax examples:\n'
' - \'com.apple.MCX=\' will keep the preference domain \'com.apple.MCX\'.\n'
' - \'com.apple.MCX=com.apple.cachedaccounts.CreateAtLogin\' will keep the preference\n'
' domain value from the \'com.apple.MCX\' preference domain _specifically_.\n'
' - \'com.apple.MCX=com.apple.cachedaccounts.CreateAtLogin,com.apple.cachedaccounts.WarnOnCreate\'\n'
' will keep the two values for the \'com.apple.MCX\' preference domain.\n'
'Please note that filtering values is only done if the preference domain is also specified\n\n'
'In the example dictionary below:\n'
' - \'com.apple.MCX\' is referred to as the \'preference domain\'.\n'
' - \'com.apple.cachedaccounts.CreateAtLogin\' is referred to as the \'preference domain value\'.\n'
' This domain value should be taken from the \'mcx_preference_settings\' dictionary.\n\n')
ds_obj_mcx_dict_example = {'com.apple.MCX': {'Forced': [{'mcx_preference_settings': {'com.apple.cachedaccounts.CreateAtLogin': True,
'com.apple.cachedaccounts.CreatePHDAtLogin': False,
'com.apple.cachedaccounts.WarnOnCreate': False}}]},
'com.apple.dock': {'Forced': [{'mcx_preference_settings': {'AppItems-Raw': [],
'DocItems-Raw': [],
'contents-immutable': False,
'static-only': False},
'mcx_union_policy_keys': [{'mcx_input_key_names': ['AppItems-Raw'],
'mcx_output_key_name': 'static-apps',
'mcx_remove_duplicates': True},
{'mcx_input_key_names': ['DocItems-Raw'],
'mcx_output_key_name': 'static-others',
'mcx_remove_duplicates': True},
{'mcx_input_key_names': ['MCXDockSpecialFolders-Raw'],
'mcx_output_key_name': 'MCXDockSpecialFolders',
'mcx_remove_duplicates': True}]}]}}
ds_obj_mcx = f'{ds_obj_mcx_note}{pformat(ds_obj_mcx_dict_example)}'
| 91.540541 | 138 | 0.437851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,723 | 0.50871 |
057149c969c7c699e7d3de460f67852d23e83cd2
| 2,622 |
py
|
Python
|
monitors/dns-monitor.py
|
CompeteNZ/Minotaur
|
47afb2ed7bd9c21d1adf8cf4fd0d5396c80fd803
|
[
"MIT"
] | null | null | null |
monitors/dns-monitor.py
|
CompeteNZ/Minotaur
|
47afb2ed7bd9c21d1adf8cf4fd0d5396c80fd803
|
[
"MIT"
] | null | null | null |
monitors/dns-monitor.py
|
CompeteNZ/Minotaur
|
47afb2ed7bd9c21d1adf8cf4fd0d5396c80fd803
|
[
"MIT"
] | null | null | null |
# DESCRIPTION
# Run dns check and store the results in the db
# monitor_source = host address
# DEPENDENCIES
# Install python
# Install mysql.connector "python -m pip install mysql-connector-python"
# Install dotenv "python -m pip install python-dotenv"
# Install nslookup "python -m pip install nslookup"
# HOW TO RUN
# run cmd "python <script>"
# automate on windows using a bat file with command "python <script>" see batch folder for batch files
# automate on linux using cron with command "python <script>"
# TODO
#!/usr/bin/env python3
import os
import sys
import datetime
import mysql.connector
from nslookup import Nslookup
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
try:
conn = mysql.connector.connect(
user=os.getenv("DB_USERNAME"),
password=os.getenv("DB_PASSWORD"),
host=os.getenv("DB_HOST"),
port=int(os.getenv("DB_PORT")),
database=os.getenv("DB_DATABASE")
)
except mysql.connector.Error as err:
print(err)
sys.exit(1)
# get db connection cursor
cursor = conn.cursor()
# get list of ping monitors from the db
try:
sql = "SELECT monitor_id,monitor_type,monitor_source FROM monitors WHERE monitor_type=%s AND monitor_state=%s"
val = ('dns', 1)
cursor.execute(sql, val)
except mysql.connector.Error as err:
print(err)
sys.exit(1)
results = cursor.fetchall()
dns_query = Nslookup()
for (monitor_id, monitor_type, monitor_source) in results:
ips_record = dns_query.dns_lookup(monitor_source)
#print(ips_record.response_full, ips_record.answer)
if not ips_record.answer:
# host unknown (e.g. domain name lookup error)
# store result in the db as -1
try:
sql = "INSERT INTO monitor_results (monitor_id, monitor_type, monitor_source, monitor_result) VALUES (%s, %s, %s, %s)"
val = (monitor_id, monitor_type, monitor_source, -1)
cursor.execute(sql, val)
except mysql.connector.Error as err:
print(err)
continue
else:
# UPDATE - NOW NOT SAVING OK RESULTS ONLY ERRORS (saves on database etc)
# host found (e.g. resolved IP address)
# store result in the db
#try:
# sql = "INSERT INTO monitor_results (monitor_id, monitor_type, monitor_source, monitor_result) VALUES (%s, %s, %s, %s)"
# val = (monitor_id, monitor_type, monitor_source, 1)
# cursor.execute(sql, val)
#except mysql.connector.Error as err:
# print(err)
continue
# commit db transaction and close conection
conn.commit()
conn.close()
| 30.137931 | 131 | 0.676583 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,445 | 0.551106 |
0571570e4ea6cc0ac98e3e348473a3292c2d2151
| 797 |
py
|
Python
|
program_param.py
|
duszek123/Example_Project
|
72e65ce5f31774c250cf388dbfb0a6d2a6b3ffeb
|
[
"MIT"
] | null | null | null |
program_param.py
|
duszek123/Example_Project
|
72e65ce5f31774c250cf388dbfb0a6d2a6b3ffeb
|
[
"MIT"
] | null | null | null |
program_param.py
|
duszek123/Example_Project
|
72e65ce5f31774c250cf388dbfb0a6d2a6b3ffeb
|
[
"MIT"
] | null | null | null |
import torch
import cv2
#data dir with train i validation picture
data_dir = '/home/pawel/Pulpit/picture_data'
#source video stream
camera_source = '/dev/video2'
#flag, false, not used
save = False
#input picture size (px)
input_size = (224,224)
size_pict = input_size[0]
#part of the data from the database intended for training
batch_size = 8
#numb of process core
num_workers = 4
#numb of train epoch
epoch_num = 2
#old variable not use
frame_iterator = 0
#flag, not use
flag_start = False
#use device in project - cpu or gpu(cuda)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#using video stream in project
video_stream = vid = cv2.VideoCapture(camera_source)
if not video_stream.isOpened():
raise ValueError("Unable to open video source", camera_source)
| 24.151515 | 71 | 0.756587 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 401 | 0.503137 |
0572b494de8de54123140e45c9c69a2ed0fbad3b
| 501 |
py
|
Python
|
models/fields/__init__.py
|
hengwei-chan/3D_SBDD
|
eda6d51aaf01ef25581a46920a25161678fab76d
|
[
"MIT"
] | 67 |
2021-12-02T05:53:44.000Z
|
2022-03-31T07:21:26.000Z
|
models/fields/__init__.py
|
hengwei-chan/3D_SBDD
|
eda6d51aaf01ef25581a46920a25161678fab76d
|
[
"MIT"
] | 13 |
2021-12-05T14:23:46.000Z
|
2022-03-25T21:07:20.000Z
|
models/fields/__init__.py
|
hengwei-chan/3D_SBDD
|
eda6d51aaf01ef25581a46920a25161678fab76d
|
[
"MIT"
] | 16 |
2022-01-11T11:48:24.000Z
|
2022-03-27T19:20:58.000Z
|
from .classifier import SpatialClassifier
def get_field(config, num_classes, num_indicators, in_channels):
if config.name == 'classifier':
return SpatialClassifier(
num_classes = num_classes,
num_indicators = num_indicators,
in_channels = in_channels,
num_filters = config.num_filters,
k = config.knn,
cutoff = config.cutoff,
)
else:
raise NotImplementedError('Unknown field: %s' % config.name)
| 31.3125 | 68 | 0.628743 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.061876 |
0572d30a3c1b204b7741919022f74dedf09c6c6c
| 1,693 |
py
|
Python
|
get_data/__init__.py
|
BrunoASNascimento/inmet_api
|
ec663543b1f6a77900166df2e6bf64d1f26f910d
|
[
"MIT"
] | null | null | null |
get_data/__init__.py
|
BrunoASNascimento/inmet_api
|
ec663543b1f6a77900166df2e6bf64d1f26f910d
|
[
"MIT"
] | null | null | null |
get_data/__init__.py
|
BrunoASNascimento/inmet_api
|
ec663543b1f6a77900166df2e6bf64d1f26f910d
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
import requests
import pandas as pd
def cleaner_data(data):
columns = ['ESTACAO', 'LATITUDE', 'LONGITUDE', 'ALTITUDE', 'ANO', 'MES', 'DIA', 'HORA', 'TEMP', 'TMAX', 'TMIN', 'UR', 'URMAX', 'URMIN',
'TD', 'TDMAX', 'TDMIN', 'PRESSAONNM', 'PRESSAONNM_MAX', 'PRESSAONNM_MIN', 'VELVENTO', 'DIR_VENTO', 'RAJADA', 'RADIACAO', 'PRECIPITACAO']
df = pd.DataFrame(columns=columns)
for i in range(1, len(data)):
try:
dado = [data[i].split(' ')]
dado = pd.DataFrame(dado, columns=columns)
# print(dado)
df = df.append(dado)
except:
pass
str_float = ['LATITUDE', 'LONGITUDE', 'ALTITUDE',
'TEMP', 'TMAX', 'TMIN', 'UR', 'URMAX', 'URMIN',
'TD', 'TDMAX', 'TDMIN',
'PRESSAONNM', 'PRESSAONNM_MAX',
'PRESSAONNM_MIN', 'VELVENTO', 'DIR_VENTO',
'RAJADA', 'RADIACAO', 'PRECIPITACAO']
str_int = ['ANO', 'MES', 'DIA', 'HORA']
df[str_float] = df[str_float].astype('float')
df[str_int] = df[str_int].astype('int64')
print(df.head)
def get_data():
date_now = datetime.utcnow()
date_delta = date_now - timedelta(days=1)
date_str = date_delta.strftime("%Y%m%d")
for hour in range(0, 24):
print(hour)
url = ("http://master.iag.usp.br/fig_dados/OBSERVACAO/INMET/UND_inmet_" +
str(date_str)+str(hour).zfill(2)+"00.txt")
# print(url)
response = requests.request("GET", url)
data = response.text.split('\n')
print(len(data))
cleaner_data(data)
return data
cleaner_data(get_data())
| 32.557692 | 151 | 0.559362 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 546 | 0.322504 |
05757df9e7e0717b064bec504f59f7b4d4c67024
| 7,795 |
py
|
Python
|
get_both_current_and_active_power.py
|
wed35/Two-dimensional-Images-of-Current-and-Active-Power-Signals-for-Elevator-Condition-Recognition
|
d8a01915f46457257bda7c699fe36e7bdf4f907d
|
[
"MIT"
] | null | null | null |
get_both_current_and_active_power.py
|
wed35/Two-dimensional-Images-of-Current-and-Active-Power-Signals-for-Elevator-Condition-Recognition
|
d8a01915f46457257bda7c699fe36e7bdf4f907d
|
[
"MIT"
] | null | null | null |
get_both_current_and_active_power.py
|
wed35/Two-dimensional-Images-of-Current-and-Active-Power-Signals-for-Elevator-Condition-Recognition
|
d8a01915f46457257bda7c699fe36e7bdf4f907d
|
[
"MIT"
] | null | null | null |
#%%
# import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
import os
import time
%matplotlib inline
#%%
import pymysql
def Select(tableName, start, end) :
sql_query = """ SELECT DataSavedTime, Item005
FROM """+tableName+"""
WHERE DataSavedTime between '"""+start+"""' and '"""+end+"""
' ORDER BY DataSavedTime ASC """
# original : """ WHERE DataSavedTime between '2019-07-05 08:48:00.000' and '2019-07-05 09:47:59.900' ORDER BY DataSavedTime ASC """
conn = pymysql.connect(host='192.168.100.120', user='root',
password='1234', database='UYeG_Cloud_New', charset='utf8')
curs = conn.cursor()
curs.execute(sql_query)
rows = curs.fetchall()
list_for_data = list(rows)
df_Curr = pd.DataFrame(list_for_data).fillna(0)
df_Curr.columns=['DataSavedTime', 'Item005']
return df_Curr
#%%
def split_time(str_time):
split_colon = str_time.split(':') # [hh, mm, ss.ms]
split_dot = split_colon[2].split('.') # [ss, ms]
time_list = []
for i in range(len(split_colon)-1):
time_list.append(split_colon[i])
time_list.append(split_dot[0])
if len(split_dot)>1:
time_list.append(str(int(int(split_dot[1])/100000)))
else:
time_list.append('0')
return time_list
def check_second(time_list, mode):
temp0, temp1, temp2 = int(time_list[0]), int(time_list[1]), int(time_list[2])
ms = time_list[3]
if mode=='-1':
temp2 = int(time_list[2])-1
if temp2<0:
temp1 = int(time_list[1]) - 1
if temp1<0:
temp0 -= 1
temp1 += 59
temp2 += 59
if mode=='+1':
temp2 = int(time_list[2])+1
if temp2>59:
temp1 += 1
if temp1>59:
temp0 += 1
temp1 -= 59
temp2 -=59
time_list = [str(temp0), str(temp1), str(temp2), ms]
return time_list
def bar_time(time_set): # make time format as hh_mm_ss_ms
split_colon = time_set.split(':')
hh = split_colon[0]
mm = split_colon[1]
split_dot = split_colon[2].split('.')
if len(split_dot)==1:
ss = split_dot[0]
ms = str(0)
else:
ss = split_dot[0]
ms = str(int(int(split_dot[1])/100000))
return hh+'_'+mm+'_'+ss+'_'+ms
def colon_time(time_elements): # make time format as hh:mm:ss.ms
'''
PARAMETER => ['hh', 'mm', 'ss', 'ms']
'''
if time_elements[3]=='0':
return time_elements[0]+':'+time_elements[1]+':'+time_elements[2]
else:
return time_elements[0]+':'+time_elements[1]+':'+time_elements[2]+'.'+time_elements[3]
#%%
# get current
check_time = time.time()
start = '2019-08-09 12:03:00.000'
end = '2019-08-09 12:03:50.000'
df_cur = Select('HisItemCurr', start, end) # 600 data per minute
plt.plot(df_cur['DataSavedTime'], df_cur['Item005'])
plt.ylim(-10, 100)
plt.show()
#cur_result.to_csv('C:/Users/haeng/Desktop/test'+'.csv')
print('time duration = ', time.time()-check_time)
#%%
# get valid current values
check_time = time.time()
cur_date, cur_values = [], []
i = 1
temp_cnt = []
while i in range(1, len(df_cur['Item005'])):
if df_cur['Item005'][i-1]==0 and df_cur['Item005'][i]!=0:
cnt_zero = 0
temp_date = []
temp_values = []
temp_date.append(df_cur['DataSavedTime'][i-1])
temp_values.append(df_cur['Item005'][i-1])
j = i
while j in range(i, len(df_cur['Item005'])):
if df_cur['Item005'][j]!=0 and j+1<=len(df_cur['Item005']-1):
if df_cur['Item005'][j+1]==0:
cnt_zero += 1
else:
cnt_zero = 0
elif df_cur['Item005'][j]==0 and j+1<=len(df_cur['Item005']-1):
if df_cur['Item005'][j+1]!=0:
cnt_zero = 0
else:
cnt_zero += 1
if cnt_zero>41:
temp_cnt.append(cnt_zero)
cnt_zero = 0
break
temp_date.append(df_cur['DataSavedTime'][j])
temp_values.append(df_cur['Item005'][j])
j += 1
temp_date.append(df_cur['DataSavedTime'][j])
temp_values.append(df_cur['Item005'][j])
i = j
cur_date.append(temp_date)
cur_values.append(temp_values)
i += 1
for i in range(len(cur_date)):
del cur_date[i][len(cur_date[i])-40:]
del cur_values[i][len(cur_values[i])-40:]
print('time duration: ', time.time()-check_time)
#%%
# split current date
start_date, start_time, end_time = [], [], [] # hh:mm:ss.ms
start_time_bar, end_time_bar = [], [] # hh_mm_ss_ms
for i in range(len(cur_date)):
start_date.append(str(cur_date[i][0]).split()[0])
start_t = str(cur_date[i][0]).split()[1]
start_time.append(start_t)
start_time_bar.append(bar_time(start_t))
end_t = str(cur_date[i][len(cur_date[i])-1]).split()[1]
end_time.append(end_t)
end_time_bar.append(bar_time(end_t))
print(start_date)
print(start_time)
print(start_time_bar)
#%%
# set file name to save csv and png
file_names = []
for i in range(len(cur_date)):
file_name = start_date[i]+'_'+start_time_bar[i]
file_names.append(file_name)
print(file_names)
#%%
# save current csv and png
for i in range(len(cur_date)):
cur_start = start_date[i]+' '+start_time[i][:12]
cur_end = start_date[i]+' '+end_time[i][:12]
df_cur_save = Select('HisItemCurr', cur_start, cur_end)
df_cur_save.to_csv('./elevator_label/'+file_names[i]+'.csv')
plt.figure()
plt.plot(df_cur_save['DataSavedTime'], df_cur_save['Item005'])
plt.ylim(-10, 100)
plt.savefig('./elevator_label/'+file_names[i]+'.png')
plt.close()
#%%
# get active power by using time of current
# start_, end_ --> xx:xx:xx.xxx
df_act_dict = {}
for i in range(len(cur_date)):
# change start second by substracting 1
start_new = check_second([start_time_bar[i].split('_')[0], start_time_bar[i].split('_')[1],
start_time_bar[i].split('_')[2], start_time_bar[i].split('_')[3]], '-1')
s_temp = start_date[i]+' '+colon_time(start_new)
# change end second by adding 1
end_new = check_second([end_time_bar[i].split('_')[0], end_time_bar[i].split('_')[1],
end_time_bar[i].split('_')[2], end_time_bar[i].split('_')[3]], '+1')
e_temp = start_date[i]+' '+colon_time(end_new)
check_time = time.time()
df_act = Select('HisItemAct', s_temp, e_temp) # I don't know why this loop takes a long time in this part
df_act_dict[i] = df_act
plt.figure()
plt.plot(df_act['DataSavedTime'], df_act['Item005'])
plt.ylim(-10, 100)
plt.show()
print('time duration(plot) = ', time.time()-check_time)
#%%
# get real active power time
act_start_time, act_end_time = [], []
act_start_idx, act_end_idx = [], []
for z in range(len(cur_date)):
#print(df_act_dict[z].shape) # 261, 111
#df_act_dict[z].to_csv('./elevator_label/active_raw_test'+str(z)+'.csv')
for i in range(1, df_act_dict[z].shape[0]):
if df_act_dict[z]['Item005'][i-1]==0 and df_act_dict[z]['Item005'][i]!=0:
act_start_time.append(str(df_act_dict[z]['DataSavedTime'][i-1]).split()[1])
act_start_idx.append(i-1)
break
for i in range(df_act_dict[z].shape[0]-2, int(df_act_dict[z].shape[0]/2), -1):
if df_act_dict[z]['Item005'][i]!=0 and df_act_dict[z]['Item005'][i+1]==0:
act_end_time.append(str(df_act_dict[z]['DataSavedTime'][i+1]).split()[1])
act_end_idx.append(i+1)
break
print(act_start_idx)
print(act_start_time)
print(act_end_idx)
print(act_end_time)
#%%
# save active power csv and png
for i in range(len(cur_date)):
df_act_save = df_act_dict[i][act_start_idx[i]:act_end_idx[i]+1]
df_act_save.to_csv('./elevator_label/'+file_names[i]+'_active.csv')
plt.figure()
plt.plot(df_act_save['DataSavedTime'], df_act_save['Item005'])
plt.ylim(-10, 100)
plt.savefig('./elevator_label/'+file_names[i]+'_active.png')
plt.close()
#%%
| 29.194757 | 134 | 0.630276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,823 | 0.233868 |
057648a66341634f2bd91398e33248914e65d08f
| 435 |
py
|
Python
|
src/pynorare/cli_util.py
|
concepticon/pynorare
|
3cf5ea2d1597c5acc84963f781ff49d96b4d7e02
|
[
"MIT"
] | null | null | null |
src/pynorare/cli_util.py
|
concepticon/pynorare
|
3cf5ea2d1597c5acc84963f781ff49d96b4d7e02
|
[
"MIT"
] | 5 |
2020-07-20T11:05:07.000Z
|
2022-03-11T15:51:52.000Z
|
src/pynorare/cli_util.py
|
concepticon/pynorare
|
3cf5ea2d1597c5acc84963f781ff49d96b4d7e02
|
[
"MIT"
] | null | null | null |
from pyconcepticon import Concepticon
from pynorare.dataset import get_dataset_cls
def add_datasets(parser):
parser.add_argument(
'dataset',
nargs='+',
help='select your dataset',
type=str)
def iter_datasets(args):
for dsid in args.dataset:
cls = get_dataset_cls(args.api.datasets[dsid].path.parent)
yield cls(repos=args.norarepo, concepticon=Concepticon(args.repos.repos))
| 24.166667 | 81 | 0.691954 | 0 | 0 | 203 | 0.466667 | 0 | 0 | 0 | 0 | 33 | 0.075862 |
0576551dec71ed65de6452c0a4914168209bd3e8
| 2,987 |
py
|
Python
|
braille/lang.py
|
galou/braille-converter
|
bf3b898c212a5067d61ce7dc6828df227ddd9db5
|
[
"MIT"
] | 24 |
2015-04-03T10:24:18.000Z
|
2022-01-29T10:50:34.000Z
|
braille/lang.py
|
galou/braille-converter
|
bf3b898c212a5067d61ce7dc6828df227ddd9db5
|
[
"MIT"
] | 2 |
2016-03-28T04:10:14.000Z
|
2017-02-22T23:25:12.000Z
|
braille/lang.py
|
galou/braille-converter
|
bf3b898c212a5067d61ce7dc6828df227ddd9db5
|
[
"MIT"
] | 10 |
2015-05-06T06:26:21.000Z
|
2019-11-13T23:11:11.000Z
|
# Copyright 2012 Jonathan Paugh
# See COPYING for license details
'''
Functions that deal with lang files or rulesets
'''
import ds
import comp as cpl
from .options import opt
from .util import fwarn, do_re, gettype
import os
langdir = os.path.join(os.path.dirname(__file__), 'lang')
if not os.path.isdir(langdir):
raise IOError('Cannot load lang files; unknown dir "%s"' % langdir)
#Cache of imported rulesets, indexed by lang name
ruleset = { }
def import_ruleset(lang='amer-2', comp=None, fresh=False):
'''
loads the rules for the given language
params:
-------
lang='amer-2' Language to load. Defaults to American Grade 2.
This consists of solely of alphanumeric characters and hyphens.
comp=True - Compile the ruleset to the most succint form (brl).
The default is set by commandline-argument.
fresh=False - Get a fresh version of the ruleset, from file, rather
than relying on the cache. Defaults False.
If you change the comp option (or change the lang file), you must
set this to True to see your changes.
'''
#Don't be grumpy about underscores.
lang = lang.replace('_', '-')
rules = []
#prefer cached version first
if not fresh and lang in ruleset:
return ruleset[lang]
#Set default comp
if comp == None:
comp = opt('comp')
#Import standard (international) rules first
if (not lang == 'standard' and
not 'standard' in ruleset):
import_ruleset('standard')
cxt = ds.Context()
cxt.fname = os.path.join(langdir, lang)
cxt.lineno = 0
try:
with open(cxt.fname) as lfile:
for line in lfile:
cxt.lineno += 1
rule = __parse_rule(cxt, line, comp)
if rule:
rules.append(rule)
except IOError as e:
raise
rules.sort(cmp=__cmp_rules)
# cache ruleset for this language
ruleset[lang] = rules
if not lang == 'standard':
rules.extend(ruleset['standard'])
return rules
def __parse_rule(cxt, line, comp=False):
'''
parse a string into a line tuple.
'''
line = line.strip()
if (not line) or line[0] == '#':
return None
rule = do_re(ds.patt.rule, line)
if not rule:
fwarn(cxt, 'Invalid Rule "%s"' % line)
return None
typ = rule['type'].lower()
rule['type'] = typ
if not typ in ds.types:
fwarn(cxt, 'Unknown rule type: '+typ)
return None
if not rule['priority']:
rule['priority'] = 1
#Compile the rule. (Convert it's brl to minimum form)
fun = gettype(rule, 'comp')
if comp or fun == cpl.dotify or fun == cpl.prefix:
fun(cxt, rule)
else:
#The minimum we must do is dotify any dots
cpl.try_dotify(cxt, rule)
return rule
def __cmp_rules(x, y):
'''
cmp function for the rules.
'''
if gettype(x, 'order') < gettype(y, 'order'):
return -1
elif gettype(x, 'order') > gettype(y, 'order'):
return 1
elif x['priority'] < y['priority']:
return -1
elif x['priority'] > y['priority']:
return 1
else:
# Longer strings first
return -1 * cmp(len(x['prn']), len(y['prn']))
| 23.706349 | 69 | 0.657181 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,349 | 0.451624 |
057756ea7512bea24b4425c570ad661d5b1d078c
| 118 |
py
|
Python
|
Codeforces/B_Simple_Game.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
Codeforces/B_Simple_Game.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
Codeforces/B_Simple_Game.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
n,m=map(int,input().split())
mid=n//2
if n==1 and m==1:
print("1")
elif mid<m:
print(m-1)
else:
print(m+1)
| 14.75 | 28 | 0.542373 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.025424 |
057a549b59e9c893c4abd50247ba001cdab7fac2
| 966 |
py
|
Python
|
toughradius/tests/test_base.py
|
geosson/GSRadius
|
5870e3d055e8366f98b8e65220a1520b5da22f6d
|
[
"Apache-2.0"
] | 1 |
2019-05-12T15:06:58.000Z
|
2019-05-12T15:06:58.000Z
|
toughradius/tests/test_base.py
|
geosson/GSRadius
|
5870e3d055e8366f98b8e65220a1520b5da22f6d
|
[
"Apache-2.0"
] | null | null | null |
toughradius/tests/test_base.py
|
geosson/GSRadius
|
5870e3d055e8366f98b8e65220a1520b5da22f6d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#coding:utf-8
from toughlib import config as iconfig
import os
import requests
class TestMixin:
MANAGE_URL = 'http://127.0.0.1:18160'
def sub_path(self,path):
return "%s%s"%(TestMixin.MANAGE_URL,path)
def init_rundir(self):
try:
os.mkdir("/tmp/toughradius")
except:
print "/tmp/toughradius exists"
def init_config(self):
testfile = os.path.join(os.path.abspath(os.path.dirname(__file__)),"test.json")
self.config = iconfig.find_config(testfile)
def admin_login(self):
req = requests.Session()
r = req.post(self.sub_path("/admin/login"),data=dict(username="admin",password="root"))
if r.status_code == 200:
rjson = r.json()
msg = rjson['msg']
if rjson['code'] == 0:
return req
else:
raise Exception(msg)
else:
r.raise_for_status()
| 27.6 | 95 | 0.575569 | 864 | 0.89441 | 0 | 0 | 0 | 0 | 0 | 0 | 156 | 0.161491 |
057b75bb649e28b716661271413ac2187e4d17f1
| 48 |
py
|
Python
|
game/data/components/__init__.py
|
UnidayStudio/Easy-2D-Game-Engine
|
1a8501cba538d7542b0e24bf64eead388085480f
|
[
"MIT"
] | 8 |
2019-12-15T22:32:30.000Z
|
2021-06-14T07:38:51.000Z
|
game/data/components/__init__.py
|
UnidayStudio/Easy-2D-Game-Engine
|
1a8501cba538d7542b0e24bf64eead388085480f
|
[
"MIT"
] | null | null | null |
game/data/components/__init__.py
|
UnidayStudio/Easy-2D-Game-Engine
|
1a8501cba538d7542b0e24bf64eead388085480f
|
[
"MIT"
] | 2 |
2020-09-10T17:34:23.000Z
|
2021-03-11T09:26:26.000Z
|
from game.data.components.TestComponent import *
| 48 | 48 | 0.854167 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
057bdb050500b53da7e385ff2282c3ebb232fe64
| 121 |
py
|
Python
|
hcaptcha/__init__.py
|
yunusbyrak/py-hcaptcha
|
f429bfaba7619c2ac255ae101423d72c2866aa09
|
[
"MIT"
] | 1 |
2022-01-09T23:49:03.000Z
|
2022-01-09T23:49:03.000Z
|
hcaptcha/__init__.py
|
bryonpokemon/py-hcaptcha
|
92f723c8f5180d921731d7d04deb637099514a2e
|
[
"MIT"
] | null | null | null |
hcaptcha/__init__.py
|
bryonpokemon/py-hcaptcha
|
92f723c8f5180d921731d7d04deb637099514a2e
|
[
"MIT"
] | 1 |
2022-01-09T23:49:03.000Z
|
2022-01-09T23:49:03.000Z
|
from .challenges import Challenge
from .solvers import Solver
from .agents import random_agent
from .exceptions import *
| 24.2 | 33 | 0.826446 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
057c321a1c38497a94f1e9f85d9de7c4b624cddb
| 10,869 |
py
|
Python
|
mscode/xp/general_comparison.py
|
cohenjer/mscode
|
e761c4af0227c386bdc7d22a55a2218486faf708
|
[
"MIT"
] | null | null | null |
mscode/xp/general_comparison.py
|
cohenjer/mscode
|
e761c4af0227c386bdc7d22a55a2218486faf708
|
[
"MIT"
] | null | null | null |
mscode/xp/general_comparison.py
|
cohenjer/mscode
|
e761c4af0227c386bdc7d22a55a2218486faf708
|
[
"MIT"
] | null | null | null |
# recovery (and error) vs noise for all algorithms
# recovery (and error) vs condB for all algorithms
# recovery vs (k,d) for all algorithms (heatmap)
# todo: also condD?
# Questions:
# - test two distributions for X: Gaussian, and decreasing
# - to choose lambda(s), we fix according to average best one from a set of experiments using the same settings on the fly. The grid is very coarse. In practice, use cross-validation.
# - We initialize with 1 zero init, cf init tests for more details
# Reasonable dimensions for reasonable runtime
import numpy as np
from matplotlib import pyplot as plt
from itertools import combinations, product
import shelve
import pandas as pd
from mscode.utils.utils import count_support, redundance_count, find_lambda
from mscode.methods.algorithms import iht_mix, homp, omp, ls_kn_supp, pseudo_trick, brute_trick, ista_mix, ista, admm_mix
from mscode.utils.generator import gen_mix, initialize
import plotly.express as px
# Random seeding
np.random.seed(seed=0)
# Problem parameters
k = 5 #2
r = 6 #2
n = 50 #10
m = 50 #20
d = 100 #50
#noise = 0.03 # 0.03
SNR = 20 # dB
cond = 2*1e2
distr = 'Uniform'
tol = 1e-6
# We run the tests several times since performances are very problem-dependent
Nbdata = 50
# Recovery and error versus noise
grid_SNR = [1000, 100, 50, 40, 30, 20, 15, 10, 5, 2, 0] #[40, 20]
grid_lambda = [1e-5, 1e-4, 1e-3, 1e-2, 1e-1]
# Store results in Pandas DataFrame
store_pd = pd.DataFrame(columns=["xp", "value", "algorithm", "error type", "SNR", "lambda", "k", "r", "d", "m", "n", "cond"])
for SNR in grid_SNR:
print('SNR', SNR, 'dB')
# run 3 checks for lambda, to find a reasonable value
store_lamb = []
store_lamb_m = []
for iter in range(3):
store_lamb_m.append(find_lambda((m,n,d,k,r,SNR,cond), grid_lambda, 'Fista_m'))
store_lamb.append(find_lambda((m,n,d,k,r,SNR,cond), grid_lambda, 'Fista'))
lamb = np.median(store_lamb)
lamb_m = np.median(store_lamb_m)
print('lambda ratio is', lamb, 'and for mixed', lamb_m)
for j in range(Nbdata):
# Generate data
Y, Ytrue, D, B, X, S, sig, condB = gen_mix([m, n, d, r], k, snr=SNR, cond=cond, distr = distr)
# The default zero init
X0 = initialize([d,r], distr = 'Zeros')
# Running algorithms
X_istam, _, err_ista_m, S_ista_m = ista_mix(Y, D, B, lamb_m, k=k, X0=X0, verbose=False, tol=tol)
X_ista, _, err_ista, S_ista = ista(Y, D, B, lamb, k=k, X0=X0, verbose=False, tol=tol)
X_homp, err_homp, S_homp = homp(Y, D, B, k, X0, tol=tol)
X_iht, err_iht, S_iht = iht_mix(Y, D, B, k, X0, tol=tol)
X_trick, err_trick, S_trick = pseudo_trick(Y, D, B, k)
# Storing results
dic={
'xp':10*['XP1'],
'value':[count_support(S, S_ista_m), count_support(S, S_ista), count_support(S, S_homp), count_support(S, S_iht), count_support(S, S_trick)]+
[np.linalg.norm(X - X_istam), np.linalg.norm(X - X_ista), np.linalg.norm(X - X_homp), np.linalg.norm(X - X_iht), np.linalg.norm(X - X_trick)],
'algorithm': 2*['Mixed-FISTA', 'Block-FISTA', 'HOMP', 'IHT', 'TrickOMP'],
"error type": 5*['support recovery']+5*['reconstruction error'],
"SNR":10*[SNR], "lambda":2*[lamb, lamb_m,0,0,0],
"k":10*[k], "r":10*[r], "d":10*[d], "m":10*[m], "n":10*[n], "cond":10*[condB],
}
store_pd = store_pd.append(pd.DataFrame(dic), ignore_index=True)
## Recovery and error versus conditionning
SNR = 20
grid_cond = [1, 10, 50, 100, 5*1e2, 1e3, 5*1e3, 1e4, 5*1e4, 1e5]
for cond in grid_cond:
print('cond', cond)
# run 3 checks for lambda, to find a reasonable value
store_lamb = []
store_lamb_m = []
for iter in range(3):
store_lamb_m.append(find_lambda((m,n,d,k,r,SNR,cond), grid_lambda, 'Fista_m'))
store_lamb.append(find_lambda((m,n,d,k,r,SNR,cond), grid_lambda, 'Fista'))
lamb = np.median(store_lamb)
lamb_m = np.median(store_lamb_m)
print('lambda ratio is', lamb, 'and for mixed', lamb_m)
for j in range(Nbdata):
# Generate data
Y, Ytrue, D, B, X, S, sig, condB = gen_mix([m, n, d, r], k, snr=SNR, cond=cond, distr=distr)
# The default zero init
X0 = initialize([d,r], distr = 'Zeros')
# Running algorithms
X_istam, _, err_ista_m, S_ista_m = ista_mix(Y, D, B, lamb_m, k=k, X0=X0, verbose=False, tol=tol)
X_ista, _, err_ista, S_ista = ista(Y, D, B, lamb, k=k, X0=X0, verbose=False, tol=tol)
X_homp, err_homp, S_homp = homp(Y, D, B, k, X0, tol=tol)
X_iht, err_iht, S_iht = iht_mix(Y, D, B, k, X0, tol=tol)
X_trick, err_trick, S_trick = pseudo_trick(Y, D, B, k)
dic={
'xp':10*['XP2'],
'value':[count_support(S, S_ista_m), count_support(S, S_ista), count_support(S, S_homp), count_support(S, S_iht), count_support(S, S_trick)]+
[np.linalg.norm(X - X_istam), np.linalg.norm(X - X_ista), np.linalg.norm(X - X_homp), np.linalg.norm(X - X_iht), np.linalg.norm(X - X_trick)],
'algorithm': 2*['Mixed-FISTA', 'Block-FISTA', 'HOMP', 'IHT', 'TrickOMP'],
"error type": 5*['support recovery']+5*['reconstruction error'],
"SNR":10*[SNR], "lambda":2*[lamb, lamb_m,0,0,0],
"k":10*[k], "r":10*[r], "d":10*[d], "m":10*[m], "n":10*[n], "cond":10*[np.round(condB,3)],
}
store_pd = store_pd.append(pd.DataFrame(dic), ignore_index=True)
## Recovery and error versus (k,d)
cond = 5*1e2
grid_k = [1, 2, 5, 10, 20]
grid_d = [20, 50, 100, 200, 400]
for d in grid_d:
for k in grid_k:
print('(d,k) is', d, k)
# run 3 checks for lambda, to find a reasonable value
store_lamb = []
store_lamb_m = []
for iter in range(3):
store_lamb_m.append(find_lambda((m,n,d,k,r,SNR,cond), grid_lambda, 'Fista_m'))
store_lamb.append(find_lambda((m,n,d,k,r,SNR,cond), grid_lambda, 'Fista'))
lamb = np.median(store_lamb)
lamb_m = np.median(store_lamb_m)
print('lambda ratio is', lamb, 'and for mixed', lamb_m)
for j in range(Nbdata):
# Generate data
Y, Ytrue, D, B, X, S, sig, condB = gen_mix([m, n, d, r], k, snr=SNR, cond=cond, distr=distr)
# The default zero init
X0 = initialize([d,r], distr = 'Zeros')
# Running algorithms
X_istam, _, err_ista_m, S_ista_m = ista_mix(Y, D, B, lamb_m, k=k, X0=X0, verbose=False, tol=tol)
X_ista, _, err_ista, S_ista = ista(Y, D, B, lamb, k=k, X0=X0, verbose=False, tol=tol)
X_homp, err_homp, S_homp = homp(Y, D, B, k, X0, tol=tol)
X_iht, err_iht, S_iht = iht_mix(Y, D, B, k, X0, tol=tol)
X_trick, err_trick, S_trick = pseudo_trick(Y, D, B, k)
# Storing results
dic={
'xp':10*['XP3'],
'value':[count_support(S, S_ista_m), count_support(S, S_ista), count_support(S, S_homp), count_support(S, S_iht), count_support(S, S_trick)]+
[np.linalg.norm(X - X_istam), np.linalg.norm(X - X_ista), np.linalg.norm(X - X_homp), np.linalg.norm(X - X_iht), np.linalg.norm(X - X_trick)],
'algorithm': 2*['Mixed-FISTA', 'Block-FISTA', 'HOMP', 'IHT', 'TrickOMP'],
"error type": 5*['support recovery']+5*['reconstruction error'],
"SNR":10*[SNR], "lambda":2*[lamb, lamb_m,0,0,0],
"k":10*[k], "r":10*[r], "d":10*[d], "m":10*[m], "n":10*[n], "cond":10*[condB],
}
store_pd = store_pd.append(pd.DataFrame(dic), ignore_index=True)
df1 = store_pd[store_pd.xp=='XP1']
df2 = store_pd[store_pd.xp=='XP2']
df3 = store_pd[store_pd.xp=='XP3']
fig = px.box(df1[df1['error type']=='support recovery'], x='SNR', y='value', facet_col='algorithm', color='algorithm', title="Support recovery versus SNR", labels={'value':'Support recovery'})
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig.update_xaxes(type='category')
fig.update_layout(
font_family="HelveticaBold",
font_size=15,
autosize=False,
width=1000,
height=400,
yaxis=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis2=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis3=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis4=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis5=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
paper_bgcolor="white",#'rgb(233,233,233)',
plot_bgcolor="white",#'rgb(233,233,233)',
showlegend=False,
)
fig.show()
fig2 = px.box(df2[df2['error type']=='support recovery'], x='cond', y='value', color='algorithm', facet_col='algorithm', title="Support recovery versus conditionning of B", labels={'value':'Support recovery'})
fig2.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig2.update_xaxes(type='category')
fig2.update_layout(
font_family="HelveticaBold",
font_size=15,
autosize=False,
width=1000,
height=400,
yaxis=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis2=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis3=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis4=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis5=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
paper_bgcolor="white",#'rgb(233,233,233)',
plot_bgcolor="white",#'rgb(233,233,233)',
showlegend=False,
)
fig2.show()
# Normalizing the support recovery scores
fig3=px.density_heatmap(df3[df3['error type']=='support recovery'], x='d', y='k', z='value', facet_col='algorithm', color_continuous_scale='Viridis', histfunc="avg", labels={'value':'Support recovery'}, title='Recovery for varying sparsity and dictionary size')
fig3.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig3.update_xaxes(type='category')
fig3.update_yaxes(type='category')
fig3.update_layout(
font_family="HelveticaBold",
font_size=15,
autosize=False,
width=1000,
height=310,
paper_bgcolor="white",#'rgb(233,233,233)',
plot_bgcolor="white",#'rgb(233,233,233)',
)
fig3.show()
year = 2021
month = 10
day = 20
path = '../..'
stor_name = '{}-{}-{}'.format(year,month,day)
#store_pd.to_pickle('{}/data/XP1/{}_results'.format(path,stor_name))
#fig.write_image('{}/data/XP1/{}_plot1.pdf'.format(path,stor_name))
#fig2.write_image('{}/data/XP1/{}_plot2.pdf'.format(path,stor_name))
#fig3.write_image('{}/data/XP1/{}_plot3.pdf'.format(path,stor_name))
# for Frontiers export
#fig.write_image('{}/data/XP1/{}_plot1.jpg'.format(path,stor_name))
#fig2.write_image('{}/data/XP1/{}_plot2.jpg'.format(path,stor_name))
#fig3.write_image('{}/data/XP1/{}_plot3.jpg'.format(path,stor_name))
# to load data
#store_pd = pd.read_pickle('{}/data/XP1/{}_results'.format(path,stor_name))
| 44.004049 | 261 | 0.631153 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,440 | 0.316496 |
057c9190ccad439b376e3bce3f11d837eb5a4576
| 42 |
py
|
Python
|
tests/test_modules/simple_test_package/aa.py
|
ajylee/call_map
|
21e7684b0814eae6f16cd4bc75597dc4e9239ec0
|
[
"BSD-2-Clause"
] | 20 |
2017-12-24T00:19:15.000Z
|
2021-11-15T07:42:25.000Z
|
tests/test_modules/simple_test_package/aa.py
|
ajylee/call_map
|
21e7684b0814eae6f16cd4bc75597dc4e9239ec0
|
[
"BSD-2-Clause"
] | 1 |
2017-10-22T21:03:41.000Z
|
2017-12-24T04:26:22.000Z
|
tests/test_modules/simple_test_package/aa.py
|
ajylee/call_map
|
21e7684b0814eae6f16cd4bc75597dc4e9239ec0
|
[
"BSD-2-Clause"
] | 2 |
2017-11-04T10:06:59.000Z
|
2019-08-01T22:24:49.000Z
|
from . import bb
def foo():
bb.bar()
| 8.4 | 16 | 0.547619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
057cd72af1308e0a81b1f8fd12ba9d1678f47b2d
| 1,262 |
py
|
Python
|
tests/fixtures.py
|
GustavoKatel/pushbullet-cli
|
e5102772752a97db539594b0d50b5effb36a22e2
|
[
"MIT"
] | 176 |
2017-01-30T16:21:48.000Z
|
2022-02-10T05:32:57.000Z
|
tests/fixtures.py
|
GustavoKatel/pushbullet-cli
|
e5102772752a97db539594b0d50b5effb36a22e2
|
[
"MIT"
] | 49 |
2017-01-21T20:27:03.000Z
|
2022-01-16T02:57:51.000Z
|
tests/fixtures.py
|
GustavoKatel/pushbullet-cli
|
e5102772752a97db539594b0d50b5effb36a22e2
|
[
"MIT"
] | 21 |
2017-01-26T06:08:54.000Z
|
2022-01-04T19:53:25.000Z
|
import click
import pytest
from click.testing import CliRunner
@pytest.yield_fixture
def pb_api(mocker):
from pushbullet_cli import app
from tests.mock_pushbullet import MockPushBullet
mock_pb = MockPushBullet()
mocker.patch.object(app, "_get_pb", return_value=mock_pb)
yield mock_pb
@pytest.fixture
def runner(pb_api):
runner = CliRunner()
return runner
def wrap_runner_func(runner, func):
def invoke(arg_list=[], should_raise=True, **kwargs):
result = runner.invoke(func, arg_list, **kwargs)
if should_raise:
if result.exception is not None:
raise result.exception
assert result.exit_code == 0
return result
return invoke
@pytest.fixture
def push(runner):
from pushbullet_cli.app import push
return wrap_runner_func(runner, push)
@pytest.fixture
def list_devices(runner):
from pushbullet_cli.app import list_devices
return wrap_runner_func(runner, list_devices)
@pytest.fixture
def list_pushes(runner):
from pushbullet_cli.app import list_pushes
return wrap_runner_func(runner, list_pushes)
@pytest.fixture
def set_key(runner):
from pushbullet_cli.app import set_key
return wrap_runner_func(runner, set_key)
| 21.033333 | 61 | 0.723455 | 0 | 0 | 219 | 0.173534 | 837 | 0.663233 | 0 | 0 | 9 | 0.007132 |
057dcb0e3d38cc7460f6b046f1c4949c4d391cb9
| 2,478 |
py
|
Python
|
sktime/transformations/hierarchical/tests/test_aggregate.py
|
biologioholic/sktime
|
9d0391a04b11d22bd783b452f01aa5b4529b41a2
|
[
"BSD-3-Clause"
] | 1 |
2021-12-22T02:45:39.000Z
|
2021-12-22T02:45:39.000Z
|
sktime/transformations/hierarchical/tests/test_aggregate.py
|
biologioholic/sktime
|
9d0391a04b11d22bd783b452f01aa5b4529b41a2
|
[
"BSD-3-Clause"
] | null | null | null |
sktime/transformations/hierarchical/tests/test_aggregate.py
|
biologioholic/sktime
|
9d0391a04b11d22bd783b452f01aa5b4529b41a2
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
"""Tests for hierarchical aggregator."""
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
__author__ = ["ciaran-g"]
import pytest
from sktime.transformations.hierarchical.aggregate import Aggregator
from sktime.utils._testing.hierarchical import _bottom_hier_datagen
# test for equal output with with named/unnamed indexes
@pytest.mark.parametrize("flatten_single_levels", [True, False])
def test_aggregator_fit_transform_index(flatten_single_levels):
"""Tests fit_transform of aggregator function.
This test asserts that the output of Aggregator using fit_transfrom() with a
named multiindex is equal to an unnamed one. It also tests that
Aggregator does not change the names of the input index in both cases.
"""
agg = Aggregator(flatten_single_levels=flatten_single_levels)
X = _bottom_hier_datagen(
no_bottom_nodes=3,
no_levels=1,
)
# named indexes
X_agg = agg.fit_transform(X)
msg = "Aggregator returns wrong index names."
assert X_agg.index.names == X.index.names, msg
# unnamed indexes
X.index.rename([None] * X.index.nlevels, inplace=True)
X_agg_unnamed = agg.fit_transform(X)
assert X_agg_unnamed.index.names == X.index.names, msg
msg = "Aggregator returns different output for named and unnamed indexes."
assert X_agg.equals(X_agg_unnamed), msg
# test that flatten_single_levels works as expected
def test_aggregator_flatten():
"""Tests Aggregator flattening single levels.
This tests that the flatten_single_levels argument works as expected for a
fixed example of a complicated hierarchy.
"""
agg = Aggregator(flatten_single_levels=False)
agg_flat = Aggregator(flatten_single_levels=True)
X = _bottom_hier_datagen(
no_bottom_nodes=10,
no_levels=4,
random_seed=111,
)
# aggregate without flattening
X_agg = agg.fit_transform(X)
# aggregate with flattening
X_agg_flat = agg_flat.fit_transform(X)
msg = (
"Aggregator without flattening should have 21 unique levels, "
"with the time index removed, for random_seed=111."
)
assert len(X_agg.droplevel(-1).index.unique()) == 21, msg
msg = (
"Aggregator with flattening should have 17 unique levels, "
"with the time index removed, for random_seed=111."
)
assert len(X_agg_flat.droplevel(-1).index.unique()) == 17, msg
| 33.486486 | 80 | 0.717514 | 0 | 0 | 0 | 0 | 1,023 | 0.412833 | 0 | 0 | 1,175 | 0.474173 |
057e82bc7eee8bfd854f64e90c47dfe5089a763d
| 563 |
py
|
Python
|
doni/tests/unit/api/test_availability_window.py
|
ChameleonCloud/doni
|
e280a0fddf4ee7d2abb69ceed49a9728e88cf99b
|
[
"Apache-2.0"
] | null | null | null |
doni/tests/unit/api/test_availability_window.py
|
ChameleonCloud/doni
|
e280a0fddf4ee7d2abb69ceed49a9728e88cf99b
|
[
"Apache-2.0"
] | 49 |
2021-03-16T14:58:18.000Z
|
2022-03-14T22:06:36.000Z
|
doni/tests/unit/api/test_availability_window.py
|
ChameleonCloud/doni
|
e280a0fddf4ee7d2abb69ceed49a9728e88cf99b
|
[
"Apache-2.0"
] | null | null | null |
from flask.testing import FlaskClient
from doni.tests.unit import utils
def test_list_availability_windows(
mocker, user_auth_headers, client: "FlaskClient", database: "utils.DBFixtures"
):
mock_authorize = mocker.patch("doni.api.availability_window.authorize")
hw = database.add_hardware()
res = client.get(
f"/v1/hardware/{hw['uuid']}/availability", headers=user_auth_headers
)
assert res.status_code == 200
assert res.json == {
"availability": [],
}
assert mock_authorize.called_once_with("hardware:get")
| 29.631579 | 82 | 0.708703 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 140 | 0.248668 |
057ec8e5e224d55258d512334e2a91039899ab2c
| 747 |
py
|
Python
|
src/genui/generators/serializers.py
|
Tontolda/genui
|
c5b7da7c5a99fc16d34878e2170145ac7c8e31c4
|
[
"0BSD"
] | 15 |
2021-05-31T13:39:17.000Z
|
2022-03-30T12:04:14.000Z
|
src/genui/generators/serializers.py
|
martin-sicho/genui
|
ea7f1272030a13e8e253a7a9b6479ac6a78552d3
|
[
"MIT"
] | 3 |
2021-04-08T22:02:22.000Z
|
2022-03-16T09:10:20.000Z
|
src/genui/generators/serializers.py
|
Tontolda/genui
|
c5b7da7c5a99fc16d34878e2170145ac7c8e31c4
|
[
"0BSD"
] | 5 |
2021-03-04T11:00:54.000Z
|
2021-12-18T22:59:22.000Z
|
"""
serializers
Created by: Martin Sicho
On: 27-01-20, 17:00
"""
from rest_framework import serializers
from genui.utils.serializers import GenericModelSerializerMixIn
from genui.compounds.serializers import MolSetSerializer
from genui.projects.serializers import ProjectSerializer
from . import models
class GeneratorSerializer(GenericModelSerializerMixIn, serializers.HyperlinkedModelSerializer):
className = GenericModelSerializerMixIn.className
extraArgs = GenericModelSerializerMixIn.extraArgs
project = ProjectSerializer(many=False)
compounds = MolSetSerializer(many=True)
class Meta:
model = models.Generator
fields = ('id', 'name', 'description', 'project', 'compounds', 'className', 'extraArgs')
| 29.88 | 96 | 0.781794 | 438 | 0.586345 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.174029 |
057f8e845bc31c86789aa18cb713245d93a393bc
| 5,898 |
py
|
Python
|
cccbr_methods/models.py
|
lelandpaul/cccbr_methods
|
8fce303d7d7fd178f1b371389a4cc318852e392a
|
[
"MIT"
] | null | null | null |
cccbr_methods/models.py
|
lelandpaul/cccbr_methods
|
8fce303d7d7fd178f1b371389a4cc318852e392a
|
[
"MIT"
] | 1 |
2021-12-13T20:44:46.000Z
|
2021-12-13T20:44:46.000Z
|
cccbr_methods/models.py
|
lelandpaul/cccbr_methods
|
8fce303d7d7fd178f1b371389a4cc318852e392a
|
[
"MIT"
] | null | null | null |
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from datetime import datetime, timedelta
from sqlalchemy import Table, Column, Integer, String, Date, ForeignKey, Boolean
from sqlalchemy.orm import relationship, backref
from sqlalchemy.orm import sessionmaker
from re import sub
import os
module_path = '/'.join(__file__.split('/')[:-1])
# SQLAlchemy Setup
Base = declarative_base()
engine = create_engine('sqlite:///{}/data/methods.db?check_same_thread=False'.format(module_path))
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
class Method(Base):
__tablename__ = 'methods'
id = Column(Integer, primary_key=True) # method['id'] (formatted)
stage = Column(Integer) # mset.properties.stage
classification = Column(String(32)) # mset.properties.classification.string
plain = Column(Boolean, default=False) # mset.properties.classification['plain']
trebledodging = Column(Boolean, default=False) # mset.properties.classification['trebledodging']
little = Column(Boolean, default=False) # mset.properties.classification['little']
differential = Column(Boolean, default=False) # mset.properties.classification['differential']
lengthoflead = Column(Integer) # mset.properties.lengthoflead
numberofhunts = Column(Integer) # mset.properties.numberofhunts
huntbellpath = Column(String(32)) # mset.properties.huntbellpath
methodset_notes = Column(String(128)) # mset.properties.notes
title = Column(String(128), index=True, unique=True) # method.title
name = Column(String(128), index=True) # method.name
leadhead = Column(String(32)) # method.leadhead
leadheadcode = Column(String(32)) # method.leadheadcode
symmetry = Column(String(32)) # method.symmetry
notation = Column(String(128)) # method.notation
falseness = Column(String(32)) # method.falseness.fchgroups
extensionconstruction = Column(String(32)) # method.extensionconstruction
notes = Column(String(128)) # method.notes
pmmref = Column(String(32)) # method.references.pmmref
bnref = Column(String(32)) # method.references.bnref
cbref = Column(String(32)) # method.references.cbref
rwref = Column(String(32)) # method.references.rwref
tdmmref = Column(String(32)) # method.references.tdmmref
performances = relationship("Performance", back_populates="method")
@staticmethod
def get(search_string='', *args, **kwargs):
"""
Search for a method in the database and return the first result.
"""
# If there's an exact match for the search_string, we want to return that
# but we still want to respect the other search terms
exact = session.query(Method).filter_by(title=search_string, **kwargs).first()
if exact:
return exact
query = session.query(Method).filter(Method.title.like('%' + search_string + '%'))
return query.filter_by(**kwargs).first()
@staticmethod
def search(search_string='', *args, **kwargs):
"""
Search for a method in the database and return all results.
"""
query = session.query(Method).filter(Method.title.like('%' + search_string + '%'))
return query.filter_by(**kwargs).all()
@staticmethod
def query():
return session.query(Method)
@property
def full_notation(self):
if not ',' in self.notation: return self.notation
segments = [seg.split('.') for seg in sub('-','.-.',self.notation).strip('.').split(',')]
full_notation = ['.'.join(seg + seg[:-1][::-1]) if len(seg) > 1 else seg[0] for seg in segments]
return '.'.join(full_notation)
@property
def full_notation_list(self):
return self.full_notation.split('.')
def __repr__(self):
return '<Method {}>'.format(self.title)
def __iter__(self):
for key, val in self.__dict__.items():
if key == '_sa_instance_state': continue
yield (key, val)
class Performance(Base):
__tablename__ = 'performances'
id = Column(Integer, primary_key=True, autoincrement=True) # id
kind = Column(String(32)) # method.performances.KIND
date = Column(Date) # PERF.date
society = Column(String(32)) # PERF.society
town = Column(String(32)) # PERF.location.town
county = Column(String(32)) # PERF.location.county
building = Column(String(32)) # PERF.location.building
address = Column(String(32)) # PERF.location.address
country = Column(String(32)) # PERF.location.country
room = Column(String(32)) # PERF.location.room
region = Column(String(32)) # PERF.location.region
method_id_fk = Column(Integer, ForeignKey('methods.id'))
method = relationship("Method", back_populates="performances")
def __repr__(self):
return '<Performance {}: {}>'.format(self.kind, self.method.title)
def __iter__(self):
for key, val in self.__dict__.items():
if key == '_sa_instance_state': continue
if key == 'method': continue
yield (key, val)
| 45.369231 | 106 | 0.590709 | 5,274 | 0.894201 | 337 | 0.057138 | 1,364 | 0.231265 | 0 | 0 | 1,471 | 0.249407 |
057fec44c986714a8f02d47b39f9f891463a6252
| 848 |
py
|
Python
|
peuler_012_better.py
|
bayramcicek/mini-programs
|
3f876e3274b7beeb5e7413ac9c5275813d9f0d2d
|
[
"Unlicense"
] | null | null | null |
peuler_012_better.py
|
bayramcicek/mini-programs
|
3f876e3274b7beeb5e7413ac9c5275813d9f0d2d
|
[
"Unlicense"
] | null | null | null |
peuler_012_better.py
|
bayramcicek/mini-programs
|
3f876e3274b7beeb5e7413ac9c5275813d9f0d2d
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python3
import math
class Solution:
@staticmethod
def number_of_factor(self):
count = 0
if self == 1:
return 1
for i in range(1, math.ceil(math.sqrt(self))):
if self % i == 0:
count += 2
if math.ceil(math.sqrt(self)) == math.floor(math.sqrt(self)):
count += 1
return count
test = Solution
triangle_arr = [0]
temp, box, curr_num = 0, 0, 0
for i in range(1, 1001):
while temp <= i:
box += 1
curr_num = (box * (box + 1)) / 2
temp = test.number_of_factor(curr_num)
triangle_arr.append(curr_num)
print(curr_num)
# number_test = int(input())
#
# limit_list = []
# for a in range(number_test):
# limit_list.append(int(input()))
#
# for limit in limit_list:
# print(int(triangle_arr[limit]))
| 20.190476 | 69 | 0.5625 | 355 | 0.418632 | 0 | 0 | 335 | 0.395047 | 0 | 0 | 195 | 0.229953 |
05803580ad5cf536a86b26fbe2b79573b774b99b
| 9,253 |
py
|
Python
|
swyft/plot/plot.py
|
undark-lab/swyft
|
50aa524e2f3a2b3d1354543178ff72bc7f055a35
|
[
"MIT"
] | 104 |
2020-11-26T09:46:03.000Z
|
2022-03-18T06:22:03.000Z
|
swyft/plot/plot.py
|
cweniger/swyft
|
2c0ed514622a37e8ec4e406b99a8327ecafb7ab4
|
[
"MIT"
] | 83 |
2021-03-02T15:54:26.000Z
|
2022-03-10T08:09:05.000Z
|
swyft/plot/plot.py
|
undark-lab/swyft
|
50aa524e2f3a2b3d1354543178ff72bc7f055a35
|
[
"MIT"
] | 10 |
2021-02-04T14:27:36.000Z
|
2022-03-31T17:39:34.000Z
|
import numpy as np
import pylab as plt
from scipy.integrate import simps
def grid_interpolate_samples(x, y, bins=1000, return_norm=False):
idx = np.argsort(x)
x, y = x[idx], y[idx]
x_grid = np.linspace(x[0], x[-1], bins)
y_grid = np.interp(x_grid, x, y)
norm = simps(y_grid, x_grid)
y_grid_normed = y_grid / norm
if return_norm:
return x_grid, y_grid_normed, norm
else:
return x_grid, y_grid_normed
def get_HDI_thresholds(x, cred_level=[0.68268, 0.95450, 0.99730]):
x = x.flatten()
x = np.sort(x)[::-1] # Sort backwards
total_mass = x.sum()
enclosed_mass = np.cumsum(x)
idx = [np.argmax(enclosed_mass >= total_mass * f) for f in cred_level]
levels = np.array(x[idx])
return levels
def plot_posterior(
samples,
pois,
weights_key=None,
ax=plt,
grid_interpolate=False,
bins=100,
color="k",
contours=True,
**kwargs
):
if isinstance(pois, int):
pois = (pois,)
w = None
# FIXME: Clean up ad hoc code
if weights_key is None:
weights_key = tuple(sorted(pois))
try:
w = samples["weights"][tuple(weights_key)]
except KeyError:
if len(weights_key) == 1:
for k in samples["weights"].keys():
if weights_key[0] in k:
weights_key = k
break
w = samples["weights"][tuple(weights_key)]
elif len(weights_key) == 2:
for k in samples["weights"].keys():
if set(weights_key).issubset(k):
weights_key = k
w = samples["weights"][k]
if w is None:
return
if len(pois) == 1:
x = samples["v"][:, pois[0]]
if grid_interpolate:
# Grid interpolate samples
log_prior = samples["log_priors"][pois[0]]
w_eff = np.exp(np.log(w) + log_prior) # p(z|x) = r(x, z) p(z)
zm, v = grid_interpolate_samples(x, w_eff)
else:
v, e = np.histogram(x, weights=w, bins=bins, density=True)
zm = (e[1:] + e[:-1]) / 2
levels = sorted(get_HDI_thresholds(v))
if contours:
contour1d(zm, v, levels, ax=ax, color=color)
ax.plot(zm, v, color=color, **kwargs)
ax.set_xlim([x.min(), x.max()])
ax.set_ylim([-v.max() * 0.05, v.max() * 1.1])
# Diagnostics
mean = sum(w * x) / sum(w)
mode = zm[v == v.max()][0]
int2 = zm[v > levels[2]].min(), zm[v > levels[2]].max()
int1 = zm[v > levels[1]].min(), zm[v > levels[1]].max()
int0 = zm[v > levels[0]].min(), zm[v > levels[0]].max()
entropy = -simps(v * np.log(v), zm)
return dict(
mean=mean, mode=mode, HDI1=int2, HDI2=int1, HDI3=int0, entropy=entropy
)
elif len(pois) == 2:
# FIXME: use interpolation when grid_interpolate == True
x = samples["v"][:, pois[0]]
y = samples["v"][:, pois[1]]
counts, xbins, ybins, _ = ax.hist2d(x, y, weights=w, bins=bins, cmap="gray_r")
levels = sorted(get_HDI_thresholds(counts))
try:
ax.contour(
counts.T,
extent=[xbins.min(), xbins.max(), ybins.min(), ybins.max()],
levels=levels,
linestyles=[":", "--", "-"],
colors=color,
)
except ValueError:
print("WARNING: 2-dim contours not well-defined.")
ax.set_xlim([x.min(), x.max()])
ax.set_ylim([y.min(), y.max()])
xm = (xbins[:-1] + xbins[1:]) / 2
ym = (ybins[:-1] + ybins[1:]) / 2
cx = counts.sum(axis=1)
cy = counts.sum(axis=0)
mean = (sum(xm * cx) / sum(cx), sum(ym * cy) / sum(cy))
return dict(mean=mean, mode=None, HDI1=None, HDI2=None, HDI3=None, entropy=None)
def plot_1d(
samples,
pois,
truth=None,
bins=100,
figsize=(15, 10),
color="k",
labels=None,
label_args={},
ncol=None,
subplots_kwargs={},
fig=None,
contours=True,
) -> None:
"""Make beautiful 1-dim posteriors.
Args:
samples: Samples from `swyft.Posteriors.sample`
pois: List of parameters of interest
truth: Ground truth vector
bins: Number of bins used for histograms.
figsize: Size of figure
color: Color
labels: Custom labels (default is parameter names)
label_args: Custom label arguments
ncol: Number of panel columns
subplot_kwargs: Subplot kwargs
"""
grid_interpolate = False
diags = {}
if ncol is None:
ncol = len(pois)
K = len(pois)
nrow = (K - 1) // ncol + 1
if fig is None:
fig, axes = plt.subplots(nrow, ncol, figsize=figsize, **subplots_kwargs)
else:
axes = fig.get_axes()
lb = 0.125
tr = 0.9
whspace = 0.15
fig.subplots_adjust(
left=lb, bottom=lb, right=tr, top=tr, wspace=whspace, hspace=whspace
)
if labels is None:
labels = [samples["parameter_names"][pois[i]] for i in range(K)]
for k in range(K):
if nrow == 1 and ncol > 1:
ax = axes[k]
elif nrow == 1 and ncol == 1:
ax = axes
else:
i, j = k % ncol, k // ncol
ax = axes[j, i]
ret = plot_posterior(
samples,
pois[k],
ax=ax,
grid_interpolate=grid_interpolate,
color=color,
bins=bins,
contours=contours,
)
ax.set_xlabel(labels[k], **label_args)
if truth is not None:
ax.axvline(truth[pois[k]], ls=":", color="r")
diags[(pois[k],)] = ret
return fig, diags
def plot_corner(
samples,
pois,
bins=100,
truth=None,
figsize=(10, 10),
color="k",
labels=None,
label_args={},
contours_1d: bool = True,
fig=None,
) -> None:
"""Make a beautiful corner plot.
Args:
samples: Samples from `swyft.Posteriors.sample`
pois: List of parameters of interest
truth: Ground truth vector
bins: Number of bins used for histograms.
figsize: Size of figure
color: Color
labels: Custom labels (default is parameter names)
label_args: Custom label arguments
contours_1d: Plot 1-dim contours
fig: Figure instance
"""
K = len(pois)
if fig is None:
fig, axes = plt.subplots(K, K, figsize=figsize)
else:
axes = np.array(fig.get_axes()).reshape((K, K))
lb = 0.125
tr = 0.9
whspace = 0.1
fig.subplots_adjust(
left=lb, bottom=lb, right=tr, top=tr, wspace=whspace, hspace=whspace
)
diagnostics = {}
if labels is None:
labels = [samples["parameter_names"][pois[i]] for i in range(K)]
for i in range(K):
for j in range(K):
ax = axes[i, j]
# Switch off upper left triangle
if i < j:
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_xticks([])
ax.set_yticks([])
ax.set_frame_on(False)
continue
# Formatting labels
if j > 0 or i == 0:
ax.set_yticklabels([])
# ax.set_yticks([])
if i < K - 1:
ax.set_xticklabels([])
# ax.set_xticks([])
if i == K - 1:
ax.set_xlabel(labels[j], **label_args)
if j == 0 and i > 0:
ax.set_ylabel(labels[i], **label_args)
# Set limits
# ax.set_xlim(x_lims[j])
# if i != j:
# ax.set_ylim(y_lims[i])
# 2-dim plots
if j < i:
ret = plot_posterior(
samples, [pois[j], pois[i]], ax=ax, color=color, bins=bins
)
if truth is not None:
ax.axvline(truth[pois[j]], color="r")
ax.axhline(truth[pois[i]], color="r")
diagnostics[(pois[j], pois[i])] = ret
if j == i:
ret = plot_posterior(
samples,
pois[i],
ax=ax,
color=color,
bins=bins,
contours=contours_1d,
)
if truth is not None:
ax.axvline(truth[pois[i]], ls=":", color="r")
diagnostics[(pois[i],)] = ret
return fig, diagnostics
def contour1d(z, v, levels, ax=plt, linestyles=None, color=None, **kwargs):
y0 = -1.0 * v.max()
y1 = 5.0 * v.max()
ax.fill_between(z, y0, y1, where=v > levels[0], color=color, alpha=0.1)
ax.fill_between(z, y0, y1, where=v > levels[1], color=color, alpha=0.1)
ax.fill_between(z, y0, y1, where=v > levels[2], color=color, alpha=0.1)
# if not isinstance(colors, list):
# colors = [colors]*len(levels)
# for i, l in enumerate(levels):
# zero_crossings = np.where(np.diff(np.sign(v-l*1.001)))[0]
# for c in z[zero_crossings]:
# ax.axvline(c, ls=linestyles[i], color = colors[i], **kwargs)
if __name__ == "__main__":
pass
| 29.848387 | 88 | 0.514428 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,735 | 0.187507 |
05819bbe1c0902e6600dadc33453e92046d7a1ff
| 3,038 |
py
|
Python
|
control-gastos/python/main.py
|
manuelduarte077/Ejercicios-con-Python-NodeJS
|
d7b26fdeeb1640272847274b99b2f607145d58a4
|
[
"MIT"
] | 1 |
2021-07-13T18:43:59.000Z
|
2021-07-13T18:43:59.000Z
|
control-gastos/python/main.py
|
manuelduarte077/Ejercicios-con-Python-NodeJS
|
d7b26fdeeb1640272847274b99b2f607145d58a4
|
[
"MIT"
] | null | null | null |
control-gastos/python/main.py
|
manuelduarte077/Ejercicios-con-Python-NodeJS
|
d7b26fdeeb1640272847274b99b2f607145d58a4
|
[
"MIT"
] | null | null | null |
import os
from tabulate import tabulate
import requests
def iniciar():
os.system('cls')
while True:
print('Seleccione una opción: ')
print('\t1. Registrar movimiento')
print('\t2. Ver todos los movimientos')
print('\t3. Buscar un movimiento')
print('\t4. Modificar un movimiento')
print('\t5. Eliminar un movimiento')
print('\t6. Salir')
opcion = input('Ingrese una opción: ')
if opcion == '1':
nuevo_movimiento()
elif opcion == '2':
mostrar_movimientos()
elif opcion == '3':
buscar_movimiento()
elif opcion == '4':
modificar_movimiento()
elif opcion == '5':
eliminar_movimiento()
elif opcion == '6':
break
else:
print('Escoja una opción correcta')
def nuevo_movimiento():
tipo = input('Ingrese el tipo de movimiento \n- Ingreso\n- Gasto\n')
cantidad = input('Ingrese la cantidad: ')
fecha = input('Ingrese la fecha: ')
data = {'tipo': tipo, 'cantidad': cantidad, 'fecha': fecha}
respuesta = requests.post(
url='http://localhost:3000/movimientos/registrar', data=data)
print(respuesta.text)
def mostrar_movimientos():
response = requests.get(url='http://localhost:3000/movimientos/todos')
datos = []
for dato in response.json():
temp = []
for key, value in dato.items():
temp.append(value)
datos.append(temp)
headers = ['ID', 'TIPO DE MOVIMIENTO', 'CANTIDAD', 'FECHA']
tabla = tabulate(datos, headers, tablefmt='fancy_grid')
print(tabla)
def buscar_movimiento():
id = input('Ingrese el id del movimiento a buscar: ')
response = requests.get(url='http://localhost:3000/movimientos/buscar/'+id)
datos = []
for dato in response.json():
temp = []
for key, value in dato.items():
temp.append(value)
datos.append(temp)
headers = ['ID', 'TIPO DE MOVIMIENTO', 'CANTIDAD', 'FECHA']
tabla = tabulate(datos, headers, tablefmt='fancy_grid')
print(tabla)
def modificar_movimiento():
id = input('Ingrese el id del movimiento a modificar: ')
campo = input(
'Ingrese el campo a modificar:\n1. Tipo\n2. Cantidad\n3. Fecha')
nuevo_valor = ''
if(campo == '1'):
campo = 'tipo'
nuevo_valor = input('Ingrese el tipo de movimiento: ')
elif(campo == '2'):
campo = 'cantidad'
nuevo_valor = input('Ingrese la cantidad: ')
elif(campo == '3'):
campo = 'fecha'
nuevo_valor = input('Ingrese la fecha: ')
data = {'campo': campo, 'nuevo_valor': nuevo_valor}
respuesta = requests.post(
url='http://localhost:3000/movimientos/modificar/'+id, data=data)
print(respuesta.text)
def eliminar_movimiento():
id = input('Ingrese el id del movimiento a elimina: ')
respuesta = requests.post(
url='http://localhost:3000/movimientos/eliminar/'+id)
print(respuesta.text)
iniciar()
| 31 | 79 | 0.600066 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,024 | 0.336731 |
05826df3789ad47bc005b4bcd34765514c7e2fd2
| 409 |
py
|
Python
|
examples/idioms/programs/016.1530-depth-first-traversing-of-a-binary-tree.py
|
laowantong/paroxython
|
4626798a60eeaa765dbfab9e63e04030c9fcb1d0
|
[
"MIT"
] | 31 |
2020-05-02T13:34:26.000Z
|
2021-06-06T17:25:52.000Z
|
examples/idioms/programs/016.1530-depth-first-traversing-of-a-binary-tree.py
|
laowantong/paroxython
|
4626798a60eeaa765dbfab9e63e04030c9fcb1d0
|
[
"MIT"
] | 108 |
2019-11-18T19:41:52.000Z
|
2022-03-18T13:58:17.000Z
|
examples/idioms/programs/016.1530-depth-first-traversing-of-a-binary-tree.py
|
laowantong/paroxython
|
4626798a60eeaa765dbfab9e63e04030c9fcb1d0
|
[
"MIT"
] | 4 |
2020-05-19T08:57:44.000Z
|
2020-09-21T08:53:46.000Z
|
"""Depth-first traversing of a binary tree.
Call a function _f on every node of binary tree _bt, in depth-first infix order
Source: programming-idioms.org
"""
# Implementation author: TinyFawks
# Created on 2016-02-18T08:50:27.130406Z
# Last modified on 2016-02-18T09:16:52.625429Z
# Version 2
# Recursive DFS.
def dfs(bt):
if bt is None:
return
dfs(bt.left)
f(bt)
dfs(bt.right)
| 18.590909 | 79 | 0.694377 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 307 | 0.750611 |
0582a1028ca60869856e20d167bdffc0aa95e128
| 412 |
py
|
Python
|
pal95_doc/docs/__init__.py
|
MacHu-GWU/pal95_doc-project
|
753b865435f316e985320247489e68f465741827
|
[
"MIT"
] | 13 |
2019-10-01T02:51:27.000Z
|
2022-02-28T17:38:58.000Z
|
pal95_doc/docs/__init__.py
|
MacHu-GWU/pal95_doc-project
|
753b865435f316e985320247489e68f465741827
|
[
"MIT"
] | 2 |
2020-11-09T09:17:21.000Z
|
2021-04-27T21:20:59.000Z
|
pal95_doc/docs/__init__.py
|
MacHu-GWU/pal95_doc-project
|
753b865435f316e985320247489e68f465741827
|
[
"MIT"
] | 1 |
2020-02-28T12:05:22.000Z
|
2020-02-28T12:05:22.000Z
|
# -*- coding: utf-8 -*-
from .equipment import lt_equipment
from .spell import lt_spell_lxy, lt_spell_zle, lt_spell_lyr, lt_spell_an
from .monster import lt_monster
from .zone import lt_zone
doc_data = dict(
lt_equipment=lt_equipment,
lt_spell_lxy=lt_spell_lxy,
lt_spell_zle=lt_spell_zle,
lt_spell_lyr=lt_spell_lyr,
lt_spell_an=lt_spell_an,
lt_monster=lt_monster,
lt_zone=lt_zone,
)
| 25.75 | 72 | 0.764563 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.055825 |
0582c3422fbd8d71835125e19cb23d6667d70ef1
| 3,157 |
py
|
Python
|
nexrad/nexrad_tutorial.py
|
uva-hydroinformatics-lab/precipitation_processing
|
54ef1673900b6bb2ee38daec3aac33748a8402cd
|
[
"MIT"
] | 1 |
2019-01-08T03:57:49.000Z
|
2019-01-08T03:57:49.000Z
|
nexrad/nexrad_tutorial.py
|
uva-hydroinformatics/precipitation_processing
|
54ef1673900b6bb2ee38daec3aac33748a8402cd
|
[
"MIT"
] | null | null | null |
nexrad/nexrad_tutorial.py
|
uva-hydroinformatics/precipitation_processing
|
54ef1673900b6bb2ee38daec3aac33748a8402cd
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy.ma as ma
import numpy as np
import pyart.graph
import tempfile
import pyart.io
import boto
s3conn = boto.connect_s3("AKIAISFFH4JXWC2HYFSA","9Az+XWYP9cbL3Sh641z/tbMuC1CSpjPjQTFkHj8D")
bucket = s3conn.get_bucket('noaa-nexrad-level2')
s3key = bucket.get_key('2015/05/15/KVWX/KVWX20150515_080737_V06.gz')
print s3key
#localfile = tempfile.NamedTemporaryFile(mode='r')
localfile = open("sample_nexrad_data", "w")
s3key.get_contents_to_filename(localfile.name)
radar = pyart.io.read_nexrad_archive(localfile.name)
# display the lowest elevation scan data
display = pyart.graph.RadarDisplay(radar)
fig = plt.figure(figsize=(9, 12))
plots = [
# variable-name in pyart, display-name that we want, sweep-number of radar (0=lowest ref, 1=lowest velocity)
['reflectivity', 'Reflectivity (dBZ)', 0],
['differential_reflectivity', 'Zdr (dB)', 0],
['differential_phase', 'Phi_DP (deg)', 0],
['cross_correlation_ratio', 'Rho_HV', 0],
['velocity', 'Velocity (m/s)', 1],
['spectrum_width', 'Spectrum Width', 1]
]
def plot_radar_images(plots):
ncols = 2
nrows = len(plots)/2
for plotno, plot in enumerate(plots, start=1):
ax = fig.add_subplot(nrows, ncols, plotno)
display.plot(plot[0], plot[2], ax=ax, title=plot[1],
colorbar_label='',
axislabels=('East-West distance from radar (km)' if plotno == 6 else '',
'North-South distance from radar (km)' if plotno == 1 else ''))
display.set_limits((-300, 300), (-300, 300), ax=ax)
display.set_aspect_ratio('equal', ax=ax)
display.plot_range_rings(range(100, 350, 100), lw=0.5, col='black', ax=ax)
plt.show()
plot_radar_images(plots)
refl_grid = radar.get_field(0, 'reflectivity')
print refl_grid[0]
rhohv_grid = radar.get_field(0, 'cross_correlation_ratio')
zdr_grid = radar.get_field(0, 'differential_reflectivity')
# apply rudimentary quality control
reflow = np.less(refl_grid, 20)
zdrhigh = np.greater(np.abs(zdr_grid), 2.3)
rhohvlow = np.less(rhohv_grid, 0.95)
notweather = np.logical_or(reflow, np.logical_or(zdrhigh, rhohvlow))
print notweather[0]
qcrefl_grid = ma.masked_where(notweather, refl_grid)
print qcrefl_grid[0]
qced = radar.extract_sweeps([0])
qced.add_field_like('reflectivity', 'reflectivityqc', qcrefl_grid)
display = pyart.graph.RadarDisplay(qced)
fig = plt.figure(figsize=(10, 5))
plots = [
# variable-name in pyart, display-name that we want, sweep-number of radar (0=lowest ref, 1=lowest velocity)
['reflectivity', 'Reflectivity (dBZ)', 0],
['reflectivityqc', 'QCed Reflectivity (dBZ)', 0],
]
for plotno, plot in enumerate(plots, start=1):
ax = fig.add_subplot(1, 2, plotno)
display.plot(plot[0], plot[2], ax=ax, title=plot[1],
colorbar_label='',
axislabels=('East-West distance from radar (km)' if plotno == 2 else '',
'North-South distance from radar (km)' if plotno == 1 else ''))
display.set_limits((-300, 300), (-300, 300), ax=ax)
plt.show()
| 36.287356 | 113 | 0.667089 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,038 | 0.328793 |
05830297f5e87cadfedcaa83499c7c9b2affb118
| 3,746 |
py
|
Python
|
ServeRest-APITesting-Python/Tests/test_cart.py
|
barbosamp/automacao-api-rest-jornada-learning
|
9ceb57bc6f4d845c35a149d760775c10c3a38614
|
[
"MIT"
] | 2 |
2020-11-20T18:40:32.000Z
|
2021-04-20T23:13:13.000Z
|
ServeRest-APITesting-Python/Tests/test_cart.py
|
barbosamp/automacao-api-rest-jornada-learning
|
9ceb57bc6f4d845c35a149d760775c10c3a38614
|
[
"MIT"
] | 1 |
2020-10-22T16:16:40.000Z
|
2020-10-22T16:16:40.000Z
|
ServeRest-APITesting-Python/Tests/test_cart.py
|
kpedron/automacao-api-rest-jornada-learning
|
50ceaf9f43b03383cc65e92460b6b9a398a88e02
|
[
"MIT"
] | 2 |
2020-10-16T02:37:20.000Z
|
2020-10-31T13:54:46.000Z
|
import unittest
import requests
import json
import pytest
BASE_URL = "https://api.serverest.dev"
class Products(unittest.TestCase):
def setUp(self):
# Do authentication
# Cart endpoint requires authentication
full_url = BASE_URL + "/login"
body = {
"email": "[email protected]",
"password": "teste"
}
response = requests.post(url=full_url, json=body)
if response.status_code != 200:
pytest.fail("Some problem to get authorization token \n", False)
response_json = json.loads(response.text)
self.token = response_json["authorization"]
def test_get_all_cart(self):
full_url = BASE_URL + "/carrinhos"
# Send HTTP Request
response = requests.get(url=full_url)
# Check the response from ServeRest
self.assertEqual(response.status_code, 200, "Error in status code to get all carts")
def test_create_cart_to_user(self):
full_url = BASE_URL + "/carrinhos"
body = {
"produtos": [
{
"idProduto": "K6leHdftCeOJj8BJ",
"quantidade": 2
}
]
}
header = {"Authorization": self.token}
# Send HTTP Request
response = requests.post(url=full_url, headers=header, json=body)
# Check the response from ServeRest
self.assertEqual(response.status_code, 201, "Error in status code to create a cart")
response_json = json.loads(response.text)
self.assertEqual(response_json["message"], "Cadastro realizado com sucesso")
# Now we will delete the cart (this is a good practice)
# Buy the item will delete the cart automatically
full_url = BASE_URL + "/carrinhos/concluir-compra"
# The endpoint delete the cart using the Authorization token from the user
response = requests.delete(url=full_url, headers=header)
self.assertEqual(response.status_code, 200, "Error in status code to delete a cart")
def test_get_cart_from_specific_user(self):
full_url = BASE_URL + "/carrinhos"
query = {"idUsuario": "K6leHdftCeOJj8BJ"}
# Send HTTP Request
response = requests.get(url=full_url, params=query)
self.assertEqual(response.status_code, 200, "Error in status code to get a cart")
def test_create_cart_without_authentication(self):
full_url = BASE_URL + "/carrinhos"
body = {
"produtos": [
{
"idProduto": "K6leHdftCeOJj8BJ",
"quantidade": 2
}
]
}
# Send HTTP Request
response = requests.post(url=full_url, json=body)
# Check the response from ServeRest
self.assertEqual(response.status_code, 401)
response_json = json.loads(response.text)
self.assertEqual(response_json["message"], "Token de acesso ausente, inválido, expirado ou usuário "
"do token não existe mais")
def test_create_cart_unknown_product(self):
full_url = BASE_URL + "/carrinhos"
body = {
"produtos": [
{
"idProduto": "234",
"quantidade": 4
}
]
}
header = {"Authorization": self.token}
# Send HTTP Request
response = requests.post(url=full_url, headers=header, json=body)
# Check the response from ServeRest
self.assertEqual(response.status_code, 400)
response_json = json.loads(response.text)
self.assertEqual(response_json["message"], "Produto não encontrado")
| 32.017094 | 108 | 0.591564 | 3,648 | 0.9728 | 0 | 0 | 0 | 0 | 0 | 0 | 1,214 | 0.323733 |
05836efbaef8a6e021845f469c0a620d95e4b977
| 372 |
py
|
Python
|
MotorTorpedoQuotePT109/QuotePT109/migrations/0002_page_likes.py
|
alex-lake29/MotorTorpedoQuotePT-109
|
012d45e8a329022492acad86e6693abf0ba5b7d2
|
[
"MIT"
] | null | null | null |
MotorTorpedoQuotePT109/QuotePT109/migrations/0002_page_likes.py
|
alex-lake29/MotorTorpedoQuotePT-109
|
012d45e8a329022492acad86e6693abf0ba5b7d2
|
[
"MIT"
] | null | null | null |
MotorTorpedoQuotePT109/QuotePT109/migrations/0002_page_likes.py
|
alex-lake29/MotorTorpedoQuotePT-109
|
012d45e8a329022492acad86e6693abf0ba5b7d2
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.5 on 2022-03-21 19:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('QuotePT109', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='page',
name='likes',
field=models.IntegerField(default=0),
),
]
| 19.578947 | 49 | 0.583333 | 279 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.231183 |
0585d3beb2756c9c282cc3b3a1b2f3b72dff380a
| 474 |
py
|
Python
|
message_sender/migrations/0003_auto_20161124_1357.py
|
praekeltfoundation/seed-message-sender
|
d90ef4dc9fa248df97ca97f07569c6c70afcd1bd
|
[
"BSD-3-Clause"
] | 1 |
2017-01-03T08:53:18.000Z
|
2017-01-03T08:53:18.000Z
|
message_sender/migrations/0003_auto_20161124_1357.py
|
praekelt/seed-message-sender
|
d90ef4dc9fa248df97ca97f07569c6c70afcd1bd
|
[
"BSD-3-Clause"
] | 45 |
2016-03-16T09:32:27.000Z
|
2018-06-28T10:05:19.000Z
|
message_sender/migrations/0003_auto_20161124_1357.py
|
praekeltfoundation/seed-message-sender
|
d90ef4dc9fa248df97ca97f07569c6c70afcd1bd
|
[
"BSD-3-Clause"
] | 1 |
2016-09-28T09:32:05.000Z
|
2016-09-28T09:32:05.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-11-24 13:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("message_sender", "0002_auto_20161124_1150")]
operations = [
migrations.AlterField(
model_name="inbound",
name="transport_type",
field=models.CharField(blank=True, max_length=200, null=True),
)
]
| 24.947368 | 74 | 0.656118 | 317 | 0.668776 | 0 | 0 | 0 | 0 | 0 | 0 | 136 | 0.28692 |
058753427b8d12d1061f42dc505d9be81b5a17ea
| 15,639 |
py
|
Python
|
src/02_ppo.py
|
grzegorzwojdyga/trl
|
1921e71a7465a43dcc135d97821aa8b03bfebf8c
|
[
"Apache-2.0"
] | null | null | null |
src/02_ppo.py
|
grzegorzwojdyga/trl
|
1921e71a7465a43dcc135d97821aa8b03bfebf8c
|
[
"Apache-2.0"
] | null | null | null |
src/02_ppo.py
|
grzegorzwojdyga/trl
|
1921e71a7465a43dcc135d97821aa8b03bfebf8c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""02-ppo.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1GXTVkhpJyQQsUWn6tGQAWPmstw9adAzj
# PPO for transformer models
> A Pytorch implementation of Proximal Policy Optimization for transfomer models.
This follows the language model approach proposed in paper ["Fine-Tuning Language Models from Human Preferences"](
https://arxiv.org/pdf/1909.08593.pdf) and is similar to the [original implementation](https://github.com/openai/lm-human-preferences). The two main differences are 1) the method is implemented in Pytorch and 2) works with the `transformer` library by Hugging Face.
"""
# default_exp ppo
# export
import numpy as np
import torch.nn.functional as F
from torch.optim import Adam
import torch
import collections
import time
import random
from trl.core import (logprobs_from_logits,
whiten,
clip_by_value,
entropy_from_logits,
flatten_dict,
average_torch_dicts,
stats_to_np,
stack_dicts,
add_suffix)
"""## KL-controllers
To ensure that the learned policy does not deviate to much from the original language model the KL divergence between the policy and a reference policy (the language model before PPO training) is used as an additional reward signal. Large KL-divergences are punished and staying close to the reference is rewarded.
Two controllers are presented in the paper: an adaptive log-space proportional controller and a fixed controller.
"""
# exports
class AdaptiveKLController:
"""
Adaptive KL controller described in the paper:
https://arxiv.org/pdf/1909.08593.pdf
"""
def __init__(self, init_kl_coef, target, horizon):
self.value = init_kl_coef
self.target = target
self.horizon = horizon
def update(self, current, n_steps):
target = self.target
proportional_error = np.clip(current / target - 1, -0.2, 0.2)
mult = 1 + proportional_error * n_steps / self.horizon
self.value *= mult
# exports
class FixedKLController:
"""Fixed KL controller."""
def __init__(self, kl_coef):
self.value = kl_coef
def update(self, current, n_steps):
pass
# exports
class PPOTrainer:
"""
The PPO_trainer uses Proximal Policy Optimization to optimise language models.
"""
default_params = {
"lr": 1.41e-5,
"adap_kl_ctrl": True,
"init_kl_coef":0.2,
"target": 6,
"horizon":10000,
"gamma":1,
"lam":0.95,
"cliprange": .2,
"cliprange_value":.2,
"vf_coef":.1,
"batch_size": 256,
"forward_batch_size": 16,
"ppo_epochs": 4,
}
def __init__(self, model, ref_model, **ppo_params):
"""
Initialize PPOTrainer.
Args:
model (torch.model): Hugging Face transformer GPT2 model with value head
ref_model (torch.model): Hugging Face transformer GPT2 refrence model used for KL penalty
ppo_params (dict or None): PPO parameters for training. Can include following keys:
'lr' (float): Adam learning rate, default: 1.41e-5
'batch_size' (int): Number of samples per optimisation step, default: 256
'forward_batch_size' (int): Number of samples forward passed through model at a time, default: 16
'ppo_epochs' (int): Number of optimisation epochs per batch of samples, default: 4
'gamma' (float)): Gamma parameter for advantage calculation, default: 1.
'lam' (float): Lambda parameter for advantage calcualation, default: 0.95
'cliprange_value' (float): Range for clipping values in loss calculation, default: 0.2
'cliprange' (float): Range for clipping in PPO policy gradient loss, default: 0.2
'vf_coef' (float): Scaling factor for value loss, default: 0.1
'adap_kl_ctrl' (bool): Use adaptive KL control, otherwise linear, default: True
'init_kl_coef' (float): Initial KL penalty coefficient (used for adaptive and linear control), default: 0.2
'target' (float): Target KL value for adaptive KL control, default: 6.0
'horizon' (float): Horizon for adaptive KL control, default: 10000
"""
self.ppo_params = self.default_params
self.ppo_params.update(ppo_params)
self.ref_model = ref_model
self.model = model
self.optimizer = Adam(model.parameters(), lr=self.ppo_params['lr'])
self.kl_ctl = AdaptiveKLController(self.ppo_params['init_kl_coef'],
self.ppo_params['target'],
self.ppo_params['horizon'])
def step(self, query, response, scores):
"""
Run a PPO optimisation step.
args:
query (torch.tensor): tensor containing the encoded queries, shape [batch_size, query_length]
response (torch.tensor): tensor containing the encoded responses, shape [batch_size, response_length]
scores (torch.tensor): tensor containing the scores, shape [batch_size]
returns:
train_stats (dict): a summary of the training statistics
"""
bs = self.ppo_params['batch_size']
timing = dict()
t0 = time.time()
gen_len = response.shape[1]
model_input = torch.cat((query, response), axis=1)
t = time.time()
logprobs, ref_logprobs, values = self.batched_forward_pass(model_input, gen_len)
timing['time/ppo/forward_pass'] = time.time()-t
t = time.time()
rewards, non_score_reward, kl_coef = self.compute_rewards(scores, logprobs, ref_logprobs)
timing['time/ppo/compute_rewards'] = time.time()-t
t = time.time()
all_stats = []
idxs = list(range(bs))
for _ in range(self.ppo_params['ppo_epochs']):
random.shuffle(idxs)
for i in range(bs):
idx = idxs[i]
train_stats = self.train_minibatch(logprobs[idx:idx+1], values[idx:idx+1],
rewards[idx:idx+1], query[idx:idx+1],
response[idx:idx+1], model_input[idx:idx+1])
all_stats.append(train_stats)
timing['time/ppo/optimize_step'] = time.time()-t
t = time.time()
train_stats = stack_dicts(all_stats)
# reshape advantages/ratios such that they are not averaged.
train_stats['policy/advantages'] = torch.flatten(train_stats['policy/advantages']).unsqueeze(0)
train_stats['policy/ratio'] = torch.flatten(train_stats['policy/ratio']).unsqueeze(0)
stats = self.record_step_stats(scores=scores, logprobs=logprobs, ref_logprobs=ref_logprobs,
non_score_reward=non_score_reward, train_stats=train_stats,
kl_coef=kl_coef)
stats = stats_to_np(stats)
timing['time/ppo/calc_stats'] = time.time()-t
self.kl_ctl.update(stats['objective/kl'], self.ppo_params['batch_size'])
timing['time/ppo/total'] = time.time()-t0
stats.update(timing)
return stats
def batched_forward_pass(self, model_input, gen_len):
"""Calculate model outputs in multiple batches."""
bs = self.ppo_params['batch_size']
fbs = self.ppo_params['forward_batch_size']
logprobs = []
ref_logprobs = []
values = []
for i in range(int(self.ppo_params['batch_size']/fbs)):
m_input = model_input[i*fbs:(i+1)*fbs]
logits, _, v = self.model(m_input)
ref_logits, _, _ = self.ref_model(m_input)
values.append(v[:, -gen_len-1:-1].detach())
logprobs.append(logprobs_from_logits(logits[:,:-1,:], m_input[:,1:])[:, -gen_len:].detach())
ref_logprobs.append(logprobs_from_logits(ref_logits[:,:-1,:], m_input[:,1:])[:, -gen_len:].detach())
return torch.cat(logprobs), torch.cat(ref_logprobs), torch.cat(values)
def train_minibatch(self, logprobs, values, rewards, query, response, model_input):
"""Train one PPO minibatch"""
loss_p, loss_v, train_stats = self.loss(logprobs, values, rewards, query, response, model_input)
loss = loss_p + loss_v
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return train_stats
def compute_rewards(self, scores, logprobs, ref_logprobs):
"""Compute per token rewards from scores and KL-penalty."""
kl = logprobs - ref_logprobs
non_score_reward = -self.kl_ctl.value * kl
rewards = non_score_reward.clone().detach()
rewards[:, -1] += scores
return rewards, non_score_reward, self.kl_ctl.value
def loss(self, old_logprobs, values, rewards, query, response, model_input):
"""Calculate policy and value losses."""
lastgaelam = 0
advantages_reversed = []
gen_len = response.shape[1]
for t in reversed(range(gen_len)):
nextvalues = values[:, t + 1] if t < gen_len - 1 else 0.0
delta = rewards[:, t] + self.ppo_params['gamma'] * nextvalues - values[:, t]
lastgaelam = delta + self.ppo_params['gamma'] * self.ppo_params['lam'] * lastgaelam
advantages_reversed.append(lastgaelam)
advantages = torch.stack(advantages_reversed[::-1]).transpose(0, 1)
returns = advantages + values
advantages = whiten(advantages)
advantages = advantages.detach()
logits, _, vpred = self.model(model_input)
logprob = logprobs_from_logits(logits[:,:-1,:], model_input[:, 1:])
#only the generation part of the values/logprobs is needed
logprob, vpred = logprob[:, -gen_len:], vpred[:,-gen_len-1:-1]
vpredclipped = clip_by_value(vpred,
values - self.ppo_params["cliprange_value"],
values + self.ppo_params["cliprange_value"])
vf_losses1 = (vpred - returns)**2
vf_losses2 = (vpredclipped - returns)**2
vf_loss = .5 * torch.mean(torch.max(vf_losses1, vf_losses2))
vf_clipfrac = torch.mean(torch.gt(vf_losses2, vf_losses1).double())
ratio = torch.exp(logprob - old_logprobs)
pg_losses = -advantages * ratio
pg_losses2 = -advantages * torch.clamp(ratio,
1.0 - self.ppo_params['cliprange'],
1.0 + self.ppo_params['cliprange'])
pg_loss = torch.mean(torch.max(pg_losses, pg_losses2))
pg_clipfrac = torch.mean(torch.gt(pg_losses2, pg_losses).double())
loss = pg_loss + self.ppo_params['vf_coef'] * vf_loss
entropy = torch.mean(entropy_from_logits(logits))
approxkl = .5 * torch.mean((logprob - old_logprobs)**2)
policykl = torch.mean(logprob - old_logprobs)
return_mean, return_var = torch.mean(returns), torch.var(returns)
value_mean, value_var = torch.mean(values), torch.var(values)
stats = dict(
loss=dict(policy=pg_loss, value=vf_loss, total=loss),
policy=dict(entropy=entropy, approxkl=approxkl,policykl=policykl, clipfrac=pg_clipfrac,
advantages=advantages, advantages_mean=torch.mean(advantages), ratio=ratio),
returns=dict(mean=return_mean, var=return_var),
val=dict(vpred=torch.mean(vpred), error=torch.mean((vpred - returns) ** 2),
clipfrac=vf_clipfrac, mean=value_mean, var=value_var),
)
return pg_loss, self.ppo_params['vf_coef'] * vf_loss, flatten_dict(stats)
def record_step_stats(self, kl_coef, **data):
"""Record training step statistics."""
kl = data['logprobs'] - data['ref_logprobs']
mean_kl = torch.mean(torch.sum(kl, axis=-1))
mean_entropy = torch.mean(torch.sum(-data['logprobs'], axis=1))
mean_non_score_reward =torch.mean(torch.sum(data['non_score_reward'], axis=1))
stats = {
'objective/kl': mean_kl,
'objective/kl_dist': kl,
'objective/logprobs': data['logprobs'],
'objective/ref_logprobs': data['ref_logprobs'],
'objective/kl_coef': kl_coef,
'objective/entropy': mean_entropy,
'ppo/mean_non_score_reward': mean_non_score_reward,
}
for k, v in data['train_stats'].items():
stats[f'ppo/{k}'] = torch.mean(v, axis=0)
stats['ppo/val/var_explained'] = 1 - stats['ppo/val/error'] / stats['ppo/returns/var']
return stats
"""## Tensor shapes and contents
Debugging tensor shapes and contents usually involves inserting a lot of print statements in the code. To avoid this in the future I add a list of the tensor shapes and contents for reference. If the tensors are sliced or reshaped I list the last shape.
| Name | Shape | Content |
|-------|---------|---------|
| `query` | `[batch_size, query_length]`| contains token ids of query|
| `response`| `[batch_size, response_length]`| contains token ids of responses|
| `scores`| `[batch_size]`| rewards of each query/response pair|
| `model_input`| `[batch_size, query_length + response_length]`| combined query and response tokens|
| `m_input`|`[forward_batch_size, query_length + response_length]`| small forward batch of model_input|
| `logits` | `[forward_batch_size, query_length + response_length, vocab_size]`| logits from model outputs|
| `ref_logits`|`[forward_batch_size, query_length + response_length, vocab_size]`| logits from ref_model outputs|
| `logprobs`| `[batch_size, response_length]`| log-probabilities of response tokens |
| `ref_logprobs`| `[batch_size, response_length]`| reference log-probabilities of response tokens |
| `rewards`| `[batch_size, response_length]`| the model rewards incl. kl-score for each token|
| `non_score_reward`| `[batch_size, response_length]`| the model kl-score for each token|
## Model output alignments
Some notes on output alignments, since I spent a considerable time debugging this. All model outputs are shifted by 1 to the model inputs. That means that the logits are shifted by one as well as values. For this reason the logits and values are always shifted one step to the left. This also means we don't have logits for the first input element and so we delete the first input token when calculating the softmax, since we don't have logits predictions. The same applies for the values and we shift them by index one to the left.
## KL-divergence
One question that came up during the implementation was "Why is the KL-divergence just the difference of the log-probs? Where is the probability in front of the log term?". The answer can be found in Sergey Levine's [lecture slides](http://rll.berkeley.edu/deeprlcourse/docs/week_3_lecture_1_dynamics_learning.pdf): To calculate the KL divergence we calculate the expected value of the log term. The probability usually in front of the log-term comes from that expected value and for a set of trajectories we can simply take the mean over the sampled trajectories.
"""
| 47.390909 | 564 | 0.632585 | 11,416 | 0.72997 | 0 | 0 | 0 | 0 | 0 | 0 | 7,162 | 0.457958 |
0587d07321592ddb102cc4ed98640454fd0d67f7
| 4,589 |
py
|
Python
|
RockPaperScissors.py
|
andreimaftei28/projects-on-JetBrainAcademy
|
8c2b8ab7bab5757db94e9f0b6d55c33852f64ee1
|
[
"MIT"
] | null | null | null |
RockPaperScissors.py
|
andreimaftei28/projects-on-JetBrainAcademy
|
8c2b8ab7bab5757db94e9f0b6d55c33852f64ee1
|
[
"MIT"
] | null | null | null |
RockPaperScissors.py
|
andreimaftei28/projects-on-JetBrainAcademy
|
8c2b8ab7bab5757db94e9f0b6d55c33852f64ee1
|
[
"MIT"
] | 3 |
2020-12-19T13:48:06.000Z
|
2021-08-12T18:36:33.000Z
|
"""Rock Paper Scisssors game using OOP"""
import random
from tempfile import mkstemp
from shutil import move, copymode
from os import fdopen, remove
class RockPaperScissors:
"""initializing the 'global' atributtes"""
def __init__(self):
self.defeat = {"scissors": "rock", "paper" : "scissors", "rock" : "paper"}
self.choices = ["rock", "paper", "scissors"]
self.score = 0
self.name = input("Enter your name: ")
def file(self):
"""method keeping track of players rating in 'rating.txt' file"""
file = open("rating.txt", "r+", encoding="utf-8")
for line in file:
line1 = line.rstrip()
if self.name == line1.split()[0]:
score = line1.split()[1]
self.score = int(score)
self.play()
print(line.replace(score, str(self.score)), file=file, flush=True)
file.close()
break
else:
if self.name != line1.split()[0]:
continue
else:
score = line1.split()[1]
self.play()
print(line.replace(score, str(self.score)), file=file, flush=True)
file.close()
break
else:
self.play()
print(self.name, self.score, sep=" ", file=file, flush=True)
file.close()
def play(self):
"""method is checking word imputed by user against the initial dict of words,
and increase rating if user wins,or is a draw"""
print(f"Hello, {self.name}")
self.rewrite_options()
print("Okay, let's start")
while True:
user_input = input("Enter your choice: ")
if user_input == "!rating":
print(f"Your rating: {self.score}")
continue
elif user_input == "!exit":
print("Bye!")
break
else:
choice = random.choice(self.choices)
if user_input not in self.choices:
print("Invalid input")
elif user_input == choice:
self.score += 50
print(f"There is a draw ({choice})")
elif user_input in self.defeat[choice]:
self.score += 100
print(f"Well done. The computer chose {choice} and failed")
else:
print(f"Sorry, but the computer chose {choice}")
def rewrite_file(self):
"""method updating rating of all players by rewriting 'rating.txt' file"""
names = []
dict_ = {}
fake_f = "rating.txt"
abs_path = "C:/Users/dandei/Desktop/jetBrain_projects/rock_paper_scissors/rating.txt" #change this with your path
fake_f, abs_path = mkstemp()
with fdopen(fake_f, "w") as new_file:
with open("rating.txt", "r+", encoding="utf-8") as file:
content = file.read()
content = content.split("\n")
for element in content:
if len(element) > 1:
element = element.split()
names.append(element)
dict_ = dict(names)
for key, value in dict_.items():
print(key, value, sep=" ", file=new_file)
copymode("rating.txt", abs_path)
remove("rating.txt")
move(abs_path, "rating.txt")
def rewrite_options(self):
"""method let's user choose between playing the classic game or
palying the game with more options. Changes the initial dict of words as user
inputs more options"""
choice = input("Enter your game options: ")
choices = choice.split(",")
defeat_by = {}
new_list = []
if choice == "":
return None
else:
self.choices = choices
for i in range(len(choices)):
new_list = choices[i + 1:] + choices[:i]
#wins_over
defeat_by[choices[i]] = new_list[:(len(new_list)) // 2]
self.defeat = defeat_by
#If rating.txt does not exist, it get's created here
fill = open("rating.txt", "a", encoding="utf-8")
fill.close()
#creating instance of the RockPaperScissors class
rps = RockPaperScissors()
rps.file()
rps.rewrite_file()
| 33.014388 | 123 | 0.504249 | 4,187 | 0.912399 | 0 | 0 | 0 | 0 | 0 | 0 | 1,253 | 0.273044 |
0588017972ca3ca8aebe2412eda69531f658e740
| 807 |
py
|
Python
|
jarvis/accounts/tests/factories.py
|
Anubhav722/blahblah
|
160698e06a02e671ac40de3113cd37d642e72e96
|
[
"MIT"
] | 1 |
2019-01-03T06:10:04.000Z
|
2019-01-03T06:10:04.000Z
|
jarvis/accounts/tests/factories.py
|
Anubhav722/blahblah
|
160698e06a02e671ac40de3113cd37d642e72e96
|
[
"MIT"
] | 1 |
2021-03-31T19:11:52.000Z
|
2021-03-31T19:11:52.000Z
|
jarvis/accounts/tests/factories.py
|
Anubhav722/blahblah
|
160698e06a02e671ac40de3113cd37d642e72e96
|
[
"MIT"
] | null | null | null |
from faker import Faker
from ..models import Client, UserProfile
from django.contrib.auth import get_user_model
from factory import django, SubFactory, fuzzy, Sequence, LazyAttribute
fake = Faker()
User = get_user_model()
class ClientFactory(django.DjangoModelFactory):
class Meta:
model = Client
key = fake.sha1()
secret = fake.sha256()
organization = fake.company()
class UserFactory(django.DjangoModelFactory):
class Meta:
model = User
username = fake.user_name()
email = fake.email()
class UserProfileFactory(django.DjangoModelFactory):
class Meta:
model = UserProfile
django_get_or_create = ('user',)
client = SubFactory(ClientFactory)
user = SubFactory(UserFactory)
limit = fake.numerify()
label = fake.name()
| 21.810811 | 70 | 0.700124 | 574 | 0.711276 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.007435 |
0588430e94f2e77e31265668a8e628ff493b0db0
| 24 |
py
|
Python
|
tests/components/devcon/__init__.py
|
pcaston/Open-Peer-Power
|
81805d455c548e0f86b0f7fedc793b588b2afdfd
|
[
"Apache-2.0"
] | null | null | null |
tests/components/devcon/__init__.py
|
pcaston/Open-Peer-Power
|
81805d455c548e0f86b0f7fedc793b588b2afdfd
|
[
"Apache-2.0"
] | null | null | null |
tests/components/devcon/__init__.py
|
pcaston/Open-Peer-Power
|
81805d455c548e0f86b0f7fedc793b588b2afdfd
|
[
"Apache-2.0"
] | 1 |
2019-04-24T14:10:08.000Z
|
2019-04-24T14:10:08.000Z
|
"""Tests for Devcon."""
| 12 | 23 | 0.583333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.958333 |
05884cb8cc1e8c53f7f9b4339d31feb82c92a4b6
| 98 |
py
|
Python
|
Code coach problems/Easy/Python/Isogram_Detector.py
|
Djivs/sololearn-code-solutions
|
7727dd97f79863a88841548770481f6f2abdc7bf
|
[
"MIT"
] | 1 |
2020-07-27T07:32:57.000Z
|
2020-07-27T07:32:57.000Z
|
Code coach problems/Easy/Python/Isogram_Detector.py
|
Djivs/sololearn-code-solutions
|
7727dd97f79863a88841548770481f6f2abdc7bf
|
[
"MIT"
] | null | null | null |
Code coach problems/Easy/Python/Isogram_Detector.py
|
Djivs/sololearn-code-solutions
|
7727dd97f79863a88841548770481f6f2abdc7bf
|
[
"MIT"
] | 1 |
2020-11-07T12:45:21.000Z
|
2020-11-07T12:45:21.000Z
|
a = input()
i = 0
while i != len(a):
if a[i] in a[i+1:]:
break
print(str(i == len(a)).lower())
| 14 | 31 | 0.510204 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0589b9d3ea2a64dcded6b8ab04bba1a44e732a41
| 2,813 |
py
|
Python
|
src/cbc_binary_toolkit/schemas.py
|
carbonblack/cbc-binary-toolkit
|
92c90b80e3c3e0b5c2473ef2086d2ce2fb651db4
|
[
"MIT"
] | 8 |
2020-05-12T18:08:52.000Z
|
2021-12-27T06:11:00.000Z
|
src/cbc_binary_toolkit/schemas.py
|
carbonblack/cbc-binary-toolkit
|
92c90b80e3c3e0b5c2473ef2086d2ce2fb651db4
|
[
"MIT"
] | 4 |
2020-05-13T16:07:49.000Z
|
2020-06-30T18:47:14.000Z
|
src/cbc_binary_toolkit/schemas.py
|
carbonblack/cbc-binary-toolkit
|
92c90b80e3c3e0b5c2473ef2086d2ce2fb651db4
|
[
"MIT"
] | 3 |
2020-05-16T19:57:57.000Z
|
2020-11-01T08:43:31.000Z
|
# -*- coding: utf-8 -*-
# *******************************************************
# Copyright (c) VMware, Inc. 2020-2021. All Rights Reserved.
# SPDX-License-Identifier: MIT
# *******************************************************
# *
# * DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
# * WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
# * EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
# * WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
# * NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""Schemas for Engine Results component"""
from schema import And, Or, Optional, Schema
IOCv2SEVSchema = Schema(
{
"id": And(str, len),
"match_type": And(str, lambda type: type in ["query", "equality", "regex"]),
"values": And([str], len),
Optional("field"): And(str, len),
Optional("link"): And(str, len),
"severity": And(int, lambda n: n > 0 and n < 11) # Needs stripped before sent to CBC
}
)
IOCv2Schema = Schema(
{
"id": And(str, len),
"match_type": And(str, lambda type: type in ["query", "equality", "regex"]),
"values": And([str], len),
Optional("field"): And(str, len),
Optional("link"): And(str, len)
}
)
ReportSchema = Schema(
{
"id": And(str, len),
"timestamp": And(int, lambda n: n > 0),
"title": And(str, len),
"description": And(str, len),
"severity": And(int, lambda n: n > 0 and n < 11),
Optional("link"): str,
Optional("tags"): [str],
"iocs_v2": [IOCv2Schema],
Optional("visibility"): str
}
)
EngineResponseSchema = Schema(
{
"iocs": [IOCv2SEVSchema],
"engine_name": And(str, len),
"binary_hash": And(str, lambda n: len(n) == 64),
"success": bool
}
)
BinaryMetadataSchema = Schema(
{
"sha256": And(str, lambda n: len(n) == 64),
"url": And(str, len),
"architecture": [str],
"available_file_size": Or(int, None),
"charset_id": Or(int, None),
"comments": Or(str, None),
"company_name": Or(str, None),
"copyright": Or(str, None),
"file_available": bool,
"file_description": Or(str, None),
"file_size": Or(int, None),
"file_version": Or(str, None),
"internal_name": Or(str, None),
"lang_id": Or(int, None),
"md5": And(str, lambda n: len(n) == 32),
"original_filename": Or(str, None),
"os_type": Or(str, None),
"private_build": Or(str, None),
"product_description": Or(str, None),
"product_name": Or(str, None),
"product_version": Or(str, None),
"special_build": Or(str, None),
"trademark": Or(str, None)
}
)
| 31.606742 | 93 | 0.542126 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,188 | 0.422325 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.