blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1a19af9a0fadedcb4bd7e45597b3e62571e51821 | 3c6b0521eb788dc5e54e46370373e37eab4a164b | /holistic_eval/roberta_mnli/examples/scripts/scripts/run_experiment.py | 9d5f149859a049824f329d3eeca723f861738f66 | [
"Apache-2.0"
] | permissive | y12uc231/DialEvalMetrics | 7402f883390b94854f5d5ae142f700a697d7a21c | f27d717cfb02b08ffd774e60faa6b319a766ae77 | refs/heads/main | 2023-09-02T21:56:07.232363 | 2021-11-08T21:25:24 | 2021-11-08T21:25:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,598 | py | import os
import smtplib
from email.mime.text import MIMEText
mail_host = 'smtp.163.com'
mail_user = 'aigu3525'
mail_pass = 'WOaibaobao'
sender = '[email protected]'
receivers = ['[email protected]']
def run_training(ex_title, type, paras_dict, node, GPU, logger=None , print_=False):
print('_'*100)
if type == 'MNLI': train_file = 'run_glue.py'
opt_dict = paras_dict
try:
os.mkdir('scripts/logs/' + type)
except:
x=1
message = MIMEText('Start training experiment {}'.format(str(ex_title)), 'plain', 'utf-8')
message['Subject'] = 'Experiment {}'.format(str(ex_title))
message['From'] = sender
message['To'] = receivers[0]
try:
smtpObj = smtplib.SMTP()
smtpObj.connect(mail_host, 25)
smtpObj.login(mail_user, mail_pass)
smtpObj.sendmail(
sender, receivers, message.as_string())
smtpObj.quit()
print('success')
except:
print('error') # 打印错误
if True:
print_file_train = 'scripts/logs/'+ type + '/' + ex_title+ '.print'
keys = list(opt_dict)
values = [opt_dict[key] for key in keys]
paras = ''
for i in range(len(keys)):
if values[i] == 'False':
continue
paras += ' --'
paras += keys[i]
if values[i] != 'True':
paras += '='
paras += str(values[i])
if True:
commend_list_train = []
# print(paras)
commend_list_train.append('ssh node'+node + ' \"')
commend_list_train.append('cd /root/liuyx/transformers/examples;')
commend_list_train.append('CUDA_VISIBLE_DEVICES=' + str(GPU) + ' /root/anaconda3/envs/transformer/bin/python ')
commend_list_train.append(train_file + paras +' 2>&1 | tee '+print_file_train + '')
commend_list_train.append('\"')
print(commend_list_train)
pred_return = os.system(''.join(commend_list_train))
message = MIMEText('Experiment {}, training end'.format(str(ex_title)), 'plain', 'utf-8')
message['Subject'] = 'Experiment {}'.format(str(ex_title))
message['From'] = sender
message['To'] = receivers[0]
try:
smtpObj = smtplib.SMTP()
smtpObj.connect(mail_host, 25)
smtpObj.login(mail_user, mail_pass)
smtpObj.sendmail(
sender, receivers, message.as_string())
smtpObj.quit()
print('success')
except:
print('error') # 打印错误 | [
"[email protected]"
] | |
71e14762493667ad2bae0d74ce90bd364e9333a7 | 780183a7842ad548703f3a62be0d1413fc901254 | /frappe/frappe/sessions.py | f2c5efe8e3fb2c2bf63699f3729c398fb244d934 | [
"MIT"
] | permissive | Shreyasnaik01/Library-Management | 9ab49281fd331d73c85c0d6f15797be97ecdbfc4 | 8bda4131309897c22e2fcbc54b402aded35a5523 | refs/heads/master | 2023-08-29T02:36:15.972349 | 2021-10-19T13:43:43 | 2021-10-19T13:43:43 | 418,891,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,260 | py | # Copyright (c) 2021, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Boot session from cache or build
Session bootstraps info needed by common client side activities including
permission, homepage, default variables, system defaults etc
"""
import frappe, json
from frappe import _
import frappe.utils
from frappe.utils import cint, cstr
import frappe.model.meta
import frappe.defaults
import frappe.translate
import redis
from six.moves.urllib.parse import unquote
from six import text_type
from frappe.cache_manager import clear_user_cache
@frappe.whitelist(allow_guest=True)
def clear(user=None):
frappe.local.session_obj.update(force=True)
frappe.local.db.commit()
clear_user_cache(frappe.session.user)
frappe.response['message'] = _("Cache Cleared")
def clear_sessions(user=None, keep_current=False, device=None, force=False):
'''Clear other sessions of the current user. Called at login / logout
:param user: user name (default: current user)
:param keep_current: keep current session (default: false)
:param device: delete sessions of this device (default: desktop, mobile)
:param force: triggered by the user (default false)
'''
reason = "Logged In From Another Session"
if force:
reason = "Force Logged out by the user"
for sid in get_sessions_to_clear(user, keep_current, device):
delete_session(sid, reason=reason)
def get_sessions_to_clear(user=None, keep_current=False, device=None):
'''Returns sessions of the current user. Called at login / logout
:param user: user name (default: current user)
:param keep_current: keep current session (default: false)
:param device: delete sessions of this device (default: desktop, mobile)
'''
if not user:
user = frappe.session.user
if not device:
device = ("desktop", "mobile")
if not isinstance(device, (tuple, list)):
device = (device,)
offset = 0
if user == frappe.session.user:
simultaneous_sessions = frappe.db.get_value('User', user, 'simultaneous_sessions') or 1
offset = simultaneous_sessions - 1
condition = ''
if keep_current:
condition = ' AND sid != {0}'.format(frappe.db.escape(frappe.session.sid))
return frappe.db.sql_list("""
SELECT `sid` FROM `tabSessions`
WHERE `tabSessions`.user=%(user)s
AND device in %(device)s
{condition}
ORDER BY `lastupdate` DESC
LIMIT 100 OFFSET {offset}""".format(condition=condition, offset=offset),
{"user": user, "device": device})
def delete_session(sid=None, user=None, reason="Session Expired"):
from frappe.core.doctype.activity_log.feed import logout_feed
frappe.cache().hdel("session", sid)
frappe.cache().hdel("last_db_session_update", sid)
if sid and not user:
user_details = frappe.db.sql("""select user from tabSessions where sid=%s""", sid, as_dict=True)
if user_details: user = user_details[0].get("user")
logout_feed(user, reason)
frappe.db.sql("""delete from tabSessions where sid=%s""", sid)
frappe.db.commit()
def clear_all_sessions(reason=None):
"""This effectively logs out all users"""
frappe.only_for("Administrator")
if not reason: reason = "Deleted All Active Session"
for sid in frappe.db.sql_list("select sid from `tabSessions`"):
delete_session(sid, reason=reason)
def get_expired_sessions():
'''Returns list of expired sessions'''
expired = []
for device in ("desktop", "mobile"):
expired += frappe.db.sql_list("""SELECT `sid`
FROM `tabSessions`
WHERE (NOW() - `lastupdate`) > %s
AND device = %s""", (get_expiry_period_for_query(device), device))
return expired
def clear_expired_sessions():
"""This function is meant to be called from scheduler"""
for sid in get_expired_sessions():
delete_session(sid, reason="Session Expired")
def get():
"""get session boot info"""
from frappe.boot import get_bootinfo, get_unseen_notes
from frappe.utils.change_log import get_change_log
bootinfo = None
if not getattr(frappe.conf,'disable_session_cache', None):
# check if cache exists
bootinfo = frappe.cache().hget("bootinfo", frappe.session.user)
if bootinfo:
bootinfo['from_cache'] = 1
bootinfo["user"]["recent"] = json.dumps(\
frappe.cache().hget("user_recent", frappe.session.user))
if not bootinfo:
# if not create it
bootinfo = get_bootinfo()
frappe.cache().hset("bootinfo", frappe.session.user, bootinfo)
try:
frappe.cache().ping()
except redis.exceptions.ConnectionError:
message = _("Redis cache server not running. Please contact Administrator / Tech support")
if 'messages' in bootinfo:
bootinfo['messages'].append(message)
else:
bootinfo['messages'] = [message]
# check only when clear cache is done, and don't cache this
if frappe.local.request:
bootinfo["change_log"] = get_change_log()
bootinfo["metadata_version"] = frappe.cache().get_value("metadata_version")
if not bootinfo["metadata_version"]:
bootinfo["metadata_version"] = frappe.reset_metadata_version()
bootinfo.notes = get_unseen_notes()
for hook in frappe.get_hooks("extend_bootinfo"):
frappe.get_attr(hook)(bootinfo=bootinfo)
bootinfo["lang"] = frappe.translate.get_user_lang()
bootinfo["disable_async"] = frappe.conf.disable_async
bootinfo["setup_complete"] = cint(frappe.db.get_single_value('System Settings', 'setup_complete'))
bootinfo["is_first_startup"] = cint(frappe.db.get_single_value('System Settings', 'is_first_startup'))
return bootinfo
def get_csrf_token():
if not frappe.local.session.data.csrf_token:
generate_csrf_token()
return frappe.local.session.data.csrf_token
def generate_csrf_token():
frappe.local.session.data.csrf_token = frappe.generate_hash()
frappe.local.session_obj.update(force=True)
class Session:
def __init__(self, user, resume=False, full_name=None, user_type=None):
self.sid = cstr(frappe.form_dict.get('sid') or
unquote(frappe.request.cookies.get('sid', 'Guest')))
self.user = user
self.device = frappe.form_dict.get("device") or "desktop"
self.user_type = user_type
self.full_name = full_name
self.data = frappe._dict({'data': frappe._dict({})})
self.time_diff = None
# set local session
frappe.local.session = self.data
if resume:
self.resume()
else:
if self.user:
self.start()
def start(self):
"""start a new session"""
# generate sid
if self.user=='Guest':
sid = 'Guest'
else:
sid = frappe.generate_hash()
self.data.user = self.user
self.data.sid = sid
self.data.data.user = self.user
self.data.data.session_ip = frappe.local.request_ip
if self.user != "Guest":
self.data.data.update({
"last_updated": frappe.utils.now(),
"session_expiry": get_expiry_period(self.device),
"full_name": self.full_name,
"user_type": self.user_type,
"device": self.device,
"session_country": get_geo_ip_country(frappe.local.request_ip) if frappe.local.request_ip else None,
})
# insert session
if self.user!="Guest":
self.insert_session_record()
# update user
user = frappe.get_doc("User", self.data['user'])
frappe.db.sql("""UPDATE `tabUser`
SET
last_login = %(now)s,
last_ip = %(ip)s,
last_active = %(now)s
WHERE name=%(name)s""", {
'now': frappe.utils.now(),
'ip': frappe.local.request_ip,
'name': self.data['user']
})
user.run_notifications("before_change")
user.run_notifications("on_update")
frappe.db.commit()
def insert_session_record(self):
frappe.db.sql("""insert into `tabSessions`
(`sessiondata`, `user`, `lastupdate`, `sid`, `status`, `device`)
values (%s , %s, NOW(), %s, 'Active', %s)""",
(str(self.data['data']), self.data['user'], self.data['sid'], self.device))
# also add to memcache
frappe.cache().hset("session", self.data.sid, self.data)
def resume(self):
"""non-login request: load a session"""
import frappe
from frappe.auth import validate_ip_address
data = self.get_session_record()
if data:
self.data.update({'data': data, 'user':data.user, 'sid': self.sid})
self.user = data.user
validate_ip_address(self.user)
self.device = data.device
else:
self.start_as_guest()
if self.sid != "Guest":
frappe.local.user_lang = frappe.translate.get_user_lang(self.data.user)
frappe.local.lang = frappe.local.user_lang
def get_session_record(self):
"""get session record, or return the standard Guest Record"""
from frappe.auth import clear_cookies
r = self.get_session_data()
if not r:
frappe.response["session_expired"] = 1
clear_cookies()
self.sid = "Guest"
r = self.get_session_data()
return r
def get_session_data(self):
if self.sid=="Guest":
return frappe._dict({"user":"Guest"})
data = self.get_session_data_from_cache()
if not data:
data = self.get_session_data_from_db()
return data
def get_session_data_from_cache(self):
data = frappe.cache().hget("session", self.sid)
if data:
data = frappe._dict(data)
session_data = data.get("data", {})
# set user for correct timezone
self.time_diff = frappe.utils.time_diff_in_seconds(frappe.utils.now(),
session_data.get("last_updated"))
expiry = get_expiry_in_seconds(session_data.get("session_expiry"))
if self.time_diff > expiry:
self._delete_session()
data = None
return data and data.data
def get_session_data_from_db(self):
self.device = frappe.db.sql('SELECT `device` FROM `tabSessions` WHERE `sid`=%s', self.sid)
self.device = self.device and self.device[0][0] or 'desktop'
rec = frappe.db.sql("""
SELECT `user`, `sessiondata`
FROM `tabSessions` WHERE `sid`=%s AND
(NOW() - lastupdate) < %s
""", (self.sid, get_expiry_period_for_query(self.device)))
if rec:
data = frappe._dict(eval(rec and rec[0][1] or '{}'))
data.user = rec[0][0]
else:
self._delete_session()
data = None
return data
def _delete_session(self):
delete_session(self.sid, reason="Session Expired")
def start_as_guest(self):
"""all guests share the same 'Guest' session"""
self.user = "Guest"
self.start()
def update(self, force=False):
"""extend session expiry"""
if (frappe.session['user'] == "Guest" or frappe.form_dict.cmd=="logout"):
return
now = frappe.utils.now()
self.data['data']['last_updated'] = now
self.data['data']['lang'] = str(frappe.lang)
# update session in db
last_updated = frappe.cache().hget("last_db_session_update", self.sid)
time_diff = frappe.utils.time_diff_in_seconds(now, last_updated) if last_updated else None
# database persistence is secondary, don't update it too often
updated_in_db = False
if force or (time_diff==None) or (time_diff > 600):
# update sessions table
frappe.db.sql("""update `tabSessions` set sessiondata=%s,
lastupdate=NOW() where sid=%s""" , (str(self.data['data']),
self.data['sid']))
# update last active in user table
frappe.db.sql("""update `tabUser` set last_active=%(now)s where name=%(name)s""", {
"now": now,
"name": frappe.session.user
})
frappe.db.commit()
frappe.cache().hset("last_db_session_update", self.sid, now)
updated_in_db = True
# set in memcache
frappe.cache().hset("session", self.sid, self.data)
return updated_in_db
def get_expiry_period_for_query(device=None):
if frappe.db.db_type == 'postgres':
return get_expiry_period(device)
else:
return get_expiry_in_seconds(device=device)
def get_expiry_in_seconds(expiry=None, device=None):
if not expiry:
expiry = get_expiry_period(device)
parts = expiry.split(":")
return (cint(parts[0]) * 3600) + (cint(parts[1]) * 60) + cint(parts[2])
def get_expiry_period(device="desktop"):
if device=="mobile":
key = "session_expiry_mobile"
default = "720:00:00"
else:
key = "session_expiry"
default = "06:00:00"
exp_sec = frappe.defaults.get_global_default(key) or default
# incase seconds is missing
if len(exp_sec.split(':')) == 2:
exp_sec = exp_sec + ':00'
return exp_sec
def get_geo_from_ip(ip_addr):
try:
from geolite2 import geolite2
with geolite2 as f:
reader = f.reader()
data = reader.get(ip_addr)
return frappe._dict(data)
except ImportError:
return
except ValueError:
return
except TypeError:
return
def get_geo_ip_country(ip_addr):
match = get_geo_from_ip(ip_addr)
if match:
return match.country
| [
"[email protected]"
] | |
53b6f743c52e16229449c9f99dc18438957c017f | 4290daae480aabfc35c85374a468085a6fa1a1ac | /ctt-server/openapi_server/test/test_result_controller.py | 652d6ffdfa52d2b04f99be41db1f222c6a23aec6 | [
"Apache-2.0"
] | permissive | pjakovits/radon-ctt | 01c8bc760372f6887798722c291674971b10a86d | 8c73e05a83ef66bd6e9dba6608d2bee089df7e86 | refs/heads/master | 2021-05-20T19:13:35.919410 | 2021-03-28T17:07:20 | 2021-03-28T17:07:20 | 252,386,523 | 0 | 0 | Apache-2.0 | 2020-04-02T07:34:24 | 2020-04-02T07:34:23 | null | UTF-8 | Python | false | false | 2,596 | py | # coding: utf-8
from __future__ import absolute_import
import unittest
from flask import json
from six import BytesIO
from openapi_server.models.result import Result # noqa: E501
from openapi_server.test import BaseTestCase
class TestResultController(BaseTestCase):
"""ResultController integration test stubs"""
def test_create_result(self):
"""Test case for create_result
Creates new result
"""
body = POSTResult()
response = self.client.open(
'/result',
method='POST',
data=json.dumps(body),
content_type='application/json')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_delete_result_by_uuid(self):
"""Test case for delete_result_by_uuid
Delete a result
"""
response = self.client.open(
'/result/{result_uuid}'.format(result_uuid='result_uuid_example'),
method='DELETE')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_download_result_by_uuid(self):
"""Test case for download_result_by_uuid
Downloads the generated results
"""
headers = {
'Accept': 'application/json',
}
response = self.client.open(
'/RadonCTT/result/{result_uuid}/download'.format(result_uuid='result_uuid_example'),
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_result_by_uuid(self):
"""Test case for get_result_by_uuid
Retrieve a result
"""
headers = {
'Accept': 'application/json',
}
response = self.client.open(
'/RadonCTT/result/{result_uuid}'.format(result_uuid='result_uuid_example'),
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_results(self):
"""Test case for get_results
Get all results
"""
headers = {
'Accept': 'application/json',
}
response = self.client.open(
'/RadonCTT/result',
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
a8612ac222aae209f9c985d771c92a8900557d7e | f76e8b03862264731be92bc16e4ced7b7e078b0a | /instagram/urls.py | 64a8b30ddbdcb0ea06908f107e67a02fbccf2f17 | [
"MIT"
] | permissive | bryomajor/instalite | b8d400d6b1ecc337e5008ddd6738e8df4653df05 | c3854b30235960fae89682c55c88637fb8fb05ad | refs/heads/master | 2022-12-11T15:06:32.222163 | 2021-04-07T10:13:46 | 2021-04-07T10:13:46 | 221,914,550 | 0 | 1 | MIT | 2021-06-10T22:16:42 | 2019-11-15T11:54:53 | Python | UTF-8 | Python | false | false | 1,134 | py | from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
url(r'^home/', views.timeline, name = 'index'),
url(r'^$', views.home, name = 'home'),
url(r'^signup/$', views.signup, name = 'signup'),
url(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', views.activate, name='activate'),
url(r'^search/', views.search_results, name = 'search_results'),
url(r'^user/(?P<username>\w+)', views.profile, name='profile'),
url(r'^accounts/edit/', views.edit_profile, name='edit_profile'),
url(r'^upload/$', views.upload_image, name='upload_image'),
url(r'^follow/(?P<user_id>\d+)', views.follow, name = 'follow'),
url(r'^unfollow/(?P<user_id>\d+)', views.unfollow, name='unfollow'),
url(r'^comment/(?P<image_id>\d+)', views.comment, name='comment'),
url(r'^like/(?P<image_id>\d+)', views.like, name='like'),
url(r'^is_liked/', views.is_liked, name = 'is_liked')
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) | [
"[email protected]"
] | |
2c5ad861fcce9b4203e5ff1c9b6fbdabf1e10047 | 7c0ac74b1215a5e53698924b69b89221fec0cfd6 | /torch_geometric/utils/matmul.py | 8e8f4bb6aaea1430f1a94da1c9239473e4b18be1 | [
"MIT"
] | permissive | ZackPashkin/pytorch_geometric | b30c1a220f3f5f593ec4ac12b696f2cac1ae4e0a | 3663a96c8e649af46c29a32beb03f49cc97f5b86 | refs/heads/master | 2020-03-20T09:33:51.497347 | 2019-03-19T05:07:30 | 2019-03-19T05:07:30 | 137,341,025 | 0 | 0 | null | 2018-06-14T10:05:30 | 2018-06-14T10:05:29 | null | UTF-8 | Python | false | false | 415 | py | from torch_scatter import scatter_add
def matmul(index, value, tensor):
tensor = tensor if tensor.dim() > 1 else tensor.unsqueeze(-1)
assert (value is None or value.dim() == 1) and tensor.dim() == 2
row, col = index
out_col = tensor[col]
out_col = out_col if value is None else out_col * value.unsqueeze(-1)
out = scatter_add(out_col, row, dim=0, dim_size=tensor.size(0))
return out
| [
"[email protected]"
] | |
54dfda9b7aa9d7fa1d61f506b4266155a7266e1a | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/294/80466/submittedfiles/testes.py | 9d7f6042a7d4b71cbfecc32848c07dbb8ef2064f | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
nome= str(input('Qual o seu nome? '))
print('Olá' +nome+ 'seja bem vinda!') | [
"[email protected]"
] | |
5ffb982e62eb751b952318b60fb800f712713ca9 | f3f732881b813dd5d6e1239618f5d4d6bb394db7 | /160.intersectionLinkedList.py | 6fc29a7e0f2285d1531677f1a175803bb8ec1e0d | [] | no_license | umnstao/leetcodeOJ | 45917528abb693fa23678356497267e4ce571a4f | f7cb7cfa6e1f04efd741c2456ad930db48101573 | refs/heads/master | 2021-01-21T14:57:22.257064 | 2017-11-22T22:57:48 | 2017-11-22T22:57:48 | 95,362,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
lenA = 0
lenB = 0
curA = headA
while curA:
curA = curA.next
lenA += 1
curB = headB
while curB:
curB = curB.next
lenB += 1
curA = headA
curB = headB
if lenA > lenB:
k = lenA - lenB
while k > 0:
curA = curA.next
k = k - 1
elif lenA < lenB:
k = lenB - lenA
while k > 0:
curB = curB.next
k = k - 1
while curA:
if curA == curB:
return curA
curA = curA.next
curB = curB.next
return None
| [
"[email protected]"
] | |
e3927f2bbe1447e57c5f9862e6bdbbed472c3f4d | ef72a7df3c39c215dd90ac5e72b164eb9d7da892 | /rpg/monsters/imp.py | b23f023017e8acdf3b1f1eebfbf51bb93c44f41b | [] | no_license | thebmo/messing_around | d49a87fc1ff722428ea67bc710ca99ad287098bd | 4cb12e0b224cf7d1f93cb4ae6ff7603619fb7aa9 | refs/heads/master | 2021-01-13T02:18:50.799898 | 2015-04-08T01:12:41 | 2015-04-08T01:12:41 | 28,570,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | """
Imp monster sub-class of parents Monster and NPC.
"""
from monster import Monster
from npc import NPC
class Imp(Monster, NPC):
NAME = 'Imp'
STATS = {
'STR': 5,
'AGI': 3,
'INT': 1,
'CHA': 0,
'LCK': 0,
'max_hp': 6,
'max_ap': 1,
'level': 2,
'exp': 6,
'gold': 8,
}
| [
"[email protected]"
] | |
10d1bf00d434f01ca2cb8508777a5a075caff03a | fb51a82f51ba4e5f5160358822a6154cc5c9466b | /mrc_utils/json2db/squad_mysql_nia.py | 3179dddc87fae5ee65ad135fee5860c4450a6d07 | [] | no_license | yeongsunpark/good | cc26cda2106117d66ddc3bd89099dcd9d3c952eb | 3e5d510548d2d5e63174490344aa14539d6e8785 | refs/heads/master | 2020-05-02T03:30:38.354551 | 2019-12-04T01:18:33 | 2019-12-04T01:18:33 | 177,730,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,996 | py | import json
import logging
import os, sys
import random
import math
import time
import re
import concurrent.futures
import string
from multiprocessing import Pool
import pymysql
sys.path.append(os.path.abspath('..'))
import custom_logger
from morp_analyze_my import NLPAnalyzer
def return_myself(token):
return token
logger = logging.getLogger('root')
logger.setLevel("INFO")
logger.addHandler(custom_logger.MyHandler())
logger.info("Finish setting logger")
class SquadDb():
def __init__(self):
self.maximum = None # for squad2db, db2squad
self.is_divide = False # for squad2db, db2squad
self.is_dp = False
self.db_cnf_dict = {}
# self.context_table = "(%s, %s, %s, %s, %s, %s)"
self.context_table = "(%s, %s, %s, %s, %s)"
self.qna_table = "(%s, %s, %s, %s, %s, %s, %s, %s, %s)"
self.con = None
self.cur = None
self.dp_end = "_dp" if self.is_dp else ""
self.context_ori = ""
self.nlp_analyze = NLPAnalyzer()
self.processed_ctx_list = list()
def easy_mysql(self, cfg_dict, encoding='utf8', autocommit=False):
self.con = pymysql.connect(host=cfg_dict['host'], user=cfg_dict['usr'],
passwd=cfg_dict['pwd'], db=cfg_dict['db'], charset=encoding)
self.cur = self.con.cursor()
if autocommit is True:
self.con.autocommit(True)
def connect_db(self, table_name):
try: # try to connect to project db
cfg_dict = dict(host='localhost', usr= 'root', pwd='data~secret!', db=table_name)
#self.easy_mysql(cfg_dict, encoding=self.db_cnf_dict['encoding'], autocommit=True) # turn-on autocummit, be careful!
self.easy_mysql(cfg_dict, encoding='utf8', autocommit=True)
self.cur.execute("SET NAMES utf8")
except Exception as e:
logger.critical(e)
logger.info("Finish connecting to database...")
def insert_mysql(self, sql, varTuple):
try:
self.cur.execute(sql, varTuple)
logger.debug("Data inserted")
except pymysql.Error as e:
logger.critical(e)
logger.critical(sql%varTuple)
exit()
def insert_data(self, table, value_part, var_tuple, morph_end):
if "context" in table:
#sql = "INSERT INTO {}(id, season, data_type, title, context{}, c_datetime) VALUES {}".\
sql = "INSERT INTO {}(id, season, data_type, title, context{}) VALUES {}".\
format(table, morph_end, value_part)
else:
sql = "INSERT INTO {}(c_id, q_id, question{}, answer_start{}, answer_end{}, answer{}, cate1, cate2, cate3) VALUES {}".\
format(table, morph_end, morph_end, morph_end, morph_end, value_part)
self.insert_mysql(sql, var_tuple)
def fetch_text(self):
#sql = "SELECT c.id, c.title, c.context, q.question, q.answer, c.c_datetime " \
sql = "SELECT c.id, c.title, c.context, q.question, q.answer " \
"FROM all_context c, all_qna q WHERE q.c_id=c.id AND q.q_id = '{}';"
final_list = list()
with open(os.path.join(self.data_root_dir, self.correction_data), "r") as f:
for line in f:
item = line.strip().split("\t")
self.cur.execute(sql.format(item[0]))
row = self.cur.fetchone()
new_list = [str(x) for x in item + list(row)]
final_list.append("\t".join(new_list))
with open(os.path.join(self.data_root_dir,
"{}_original.tsv".format(self.correction_data.split(".")[0])),
"w") as f2:
f2.write("\n".join(final_list))
def squad2db(self, json_location, start_id, season, data_type, table_name):
self.connect_db(table_name)
with open(json_location) as f:
data = json.load(f)
data = data['data']
if start_id is None:
start_id = 1
for d in data:
try:
logger.info(d['title'])
title = d['title']
# c_datetime = d['c_datetime']
except KeyError:
continue
for para in d['paragraphs']:
if self.is_divide:
if random.random() >= self.test_ratio:
data_type = "train"
else:
data_type = "dev"
q_context = str(para['context'])
try:
self.context_ori = str(para['context_ori'])
except KeyError:
if self.context_ori == "":
exit("There's no context_ori")
# var_tuple_ctx = (start_id, season, data_type, title.strip(), q_context.strip(), c_datetime)
var_tuple_ctx = (start_id, season, data_type, title.strip(), q_context.strip())
#var_tuple_ctx_ori = (start_id, season, data_type, title.strip(), self.context_ori.strip(),c_datetime)
var_tuple_ctx_ori = (start_id, season, data_type, title.strip(), self.context_ori.strip())
self.insert_data(table="all_context", value_part=self.context_table, var_tuple=var_tuple_ctx, morph_end="")
self.insert_data(table="all_context_ori", value_part=self.context_table, var_tuple=var_tuple_ctx_ori, morph_end="")
if self.is_divide:
self.insert_data(table="{}_context".format(data_type), value_part=self.context_table, var_tuple=var_tuple_ctx, morph_end="")
self.insert_data(table="{}_context_ori".format(data_type), value_part=self.context_table, var_tuple=var_tuple_ctx_ori, morph_end="")
for qa in para['qas']:
q = str(qa['question'])
q_id = qa['id']
# cates = qa['category'].split("-")
for a in qa['answers']:
a_start = a['answer_start'] # int
try:
a_end = a['answer_end'] # int
except KeyError:
a_end = -1
text = a['text'] # answer text
var_tuple_qa = (start_id, q_id, q.strip().strip("?").strip(), a_start, a_end, text.strip(),
'', '', '')
self.insert_data(table="all_qna", value_part=self.qna_table, var_tuple=var_tuple_qa, morph_end="")
start_id += 1
logger.debug("num of para: %i" % len(d['paragraphs']))
def update_devset(self):
dev_id_list = list()
header = True
with open(self.test_id_file) as f:
for line in f:
if header:
header = False
continue
# lv.1 lv.2 category q_id question answer
item = line.strip().split("\t")
dev_id_list.append(item[3])
logger.info("Len of dev_id_list: {}".format(len(dev_id_list)))
fetch_sql_q = "SELECT c_id, q_id FROM all_qna WHERE q_id IN %s;"
logger.debug(tuple(dev_id_list))
self.cur.execute(fetch_sql_q, (tuple(dev_id_list),))
test_rows = self.cur.fetchall()
logger.info("Len of test_rows: {}".format(len(test_rows)))
dev_ctx_id_list = list()
dev_update_q = "UPDATE all_qna SET is_fixed = 1 WHERE q_id = %s;"
for test_row in test_rows:
logger.debug("test_row[1]: {}".format(test_row[1]))
self.cur.execute(dev_update_q, (test_row[1],)) # update dev questions
dev_ctx_id_list.append(test_row[0])
insert_dev_ctx = "INSERT INTO dev_context_fix SELECT * FROM all_context_all WHERE id IN %s;"
self.cur.execute(insert_dev_ctx, (tuple(dev_ctx_id_list),))
insert_dev_q = "INSERT INTO dev_qna_fix SELECT * FROM all_qna " \
"WHERE q_id IN (SELECT q_id FROM all_qna WHERE c_id IN %s);"
self.cur.execute(insert_dev_q, (tuple(dev_ctx_id_list),))
def check_data(self, season):
workers = 30
r = 300
fetch_sql = "SELECT c_id, q_id, question_morph, answer_morph, answer_start_morph, answer_end_morph, c.context_morph " \
"FROM all_context_all c, all_qna q WHERE c.id=q.c_id AND q.q_id LIKE '{}_%-1' " \
"ORDER BY cast(c_id as unsigned), q_id;".format(season)
self.cur.execute(fetch_sql)
qas = self.cur.fetchall()
logger.info("Fetch all qns data finished")
#check_index(qas, 0, r)
with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as exe:
fs = {exe.submit(check_index, qas, n, r) for n in range(0, len(qas), r)}
def make_plain_list(self, input_list):
return_list = list()
depth = 0
for x in input_list: # x: sentence
for y in x: # y: token
if type(y) != list:
return_list.append(y)
depth = 2
else:
for z in y: # z: morph
return_list.append(z)
depth = 3
return return_list, depth
def extract_passage(self, ctx, answer, location, c_id, q_id):
sentence_list = list()
is_skip = False
logger.info(answer)
processed_ans = self.nlp_analyze.get_tree_result(answer) # list
logger.debug(processed_ans)
processed_ans_plain, depth = self.make_plain_list(processed_ans)
processed_ans_plain = ['|/sw'] * 5 + processed_ans_plain + ['|/sw'] * 5 # plain list
if depth == 2:
processed_ans = [processed_ans]
logger.debug("processed_ans: {}".format(processed_ans))
logger.debug("processed_ans_plain: {}".format(processed_ans_plain))
ctx = "{}{}{}{}{}".format(ctx[:location], "|"*5, ctx[location:location+len(answer)], "|"*5, ctx[location+len(answer):])
processed_txt, sentence_list = self.nlp_analyze.get_tree_result(ctx, sentence_list=True)
logger.debug(processed_txt)
processed_txt_plain, depth = self.make_plain_list(processed_txt) # plain list
if depth == 2:
processed_txt = [processed_txt]
logger.debug("processed_ans: {}".format(processed_ans))
logger.debug("processed_ans_plain: {}".format(processed_ans_plain))
logger.debug("processed_txt: {}".format(processed_txt))
logger.debug("processed_txt_plain: {}".format(processed_txt_plain))
marker_idxes = [(j, j + 5) for j in range(len(processed_txt_plain))
if processed_txt_plain[j:j + 5] == ['|/sw'] * 5]
logger.debug(marker_idxes)
if len(marker_idxes) % 2 == 0:
if len(marker_idxes) == 2:
start_idx = marker_idxes[0][0]
end_idx = marker_idxes[1][1] - 10
else:
logger.critical("Not 2 markers...({}) skip: {}".format(len(marker_idxes), q_id))
is_skip = True
return 0, 0, 0, 0, is_skip
else:
logger.critical("Not 2 markers...({}) skip: {}".format(len(marker_idxes), q_id))
is_skip = True
return 0, 0, 0, 0, is_skip
logger.debug("start_idx: {}".format(start_idx))
logger.debug("end_idx: {}".format(end_idx))
for k in range(len(processed_txt)): # sentence
for l in range(len(processed_txt[k])): # token
logger.debug(processed_txt[k][l])
tmp_idxes = [(j, j + 5) for j in range(len(processed_txt[k][l]))
if processed_txt[k][l][j:j + 5] == ['|/sw'] * 5]
if len(tmp_idxes) != 0:
logger.debug(tmp_idxes)
new_processed_txt = self.remove_list_sequence(processed_txt[k][l], tmp_idxes)
logger.debug(new_processed_txt)
processed_txt[k][l] = new_processed_txt
#processed_txt[k][l] = list(filter('|/sw'.__ne__, processed_txt[k][l]))
logger.debug(processed_txt[k][l])
logger.debug(processed_txt)
final_answer = list()
cnt = 0
for k in range(len(processed_txt)):
tmp = list()
for l in range(len(processed_txt[k])):
tmp2 = list()
for m in range(len(processed_txt[k][l])): # morph
if cnt >= start_idx and cnt < end_idx:
logger.debug(processed_txt[k][l][m])
tmp2.append(processed_txt[k][l][m])
cnt += 1
if len(tmp2) > 0:
tmp.append(tmp2)
if len(tmp) > 0:
final_answer.append(tmp)
processed_txt_plain = self.remove_list_sequence(processed_txt_plain, marker_idxes)
#processed_txt_plain = list(filter('|/sw'.__ne__, processed_txt_plain))
final_answer_plain, depth = self.make_plain_list(final_answer)
try:
assert (processed_txt_plain[start_idx:end_idx] == final_answer_plain)
except AssertionError:
logger.error("{} != {}".format(processed_txt_plain[start_idx:end_idx],
final_answer_plain))
is_skip = True
return 0, 0, 0, 0, is_skip
logger.debug("answer_processed: {}".format(processed_txt_plain[start_idx:end_idx]))
logger.debug("answer_processed_return: {}".format(final_answer))
logger.debug(str(processed_txt))
return start_idx, end_idx, str(processed_txt), str(final_answer), is_skip, sentence_list
def remove_list_sequence(self, input_list, marker_idxes):
logger.debug(input_list)
logger.debug(marker_idxes)
new_ptp = list()
if len(marker_idxes) > 1:
for i in range(len(marker_idxes)):
if i == 0:
new_ptp += input_list[:marker_idxes[i][0]]
new_ptp += input_list[marker_idxes[i][1]:marker_idxes[i+1][0]]
logger.debug(input_list[:marker_idxes[i][0]])
else:
new_ptp += input_list[marker_idxes[i][1]:]
logger.debug(input_list[marker_idxes[i][1]:])
else:
new_ptp += input_list[:marker_idxes[0][0]]
new_ptp += input_list[marker_idxes[0][1]:]
logger.debug(new_ptp)
return new_ptp
def process_qa(self, type, season, table_name):
db_cnf_dict = {"host": 'localhost', "usr": "root", "pwd": "data~secret!", "db": table_name, "encoding": "utf8"}
self.connect_db(table_name)
if type == "q_only":
fetch_sql = "SELECT c_id, q_id, question FROM all_qna q " \
"WHERE q_id LIKE '%-2' AND question_morph IS NULL AND q_id LIKE '{}_%'" \
"ORDER BY cast(c_id as unsigned), q_id;".format(season)
elif type == "check_dp_length":
fetch_sql = "SELECT id, context, context_morph, context_dp " \
"FROM all_context_all c ORDER BY id;"
elif type == "dp":
fetch_sql = "SELECT id, context FROM all_context " \
"WHERE context_dp IS NULL AND cast(id AS unsigned) >= {} ORDER BY id;".format(self.start_id)
elif type == "dp_q":
fetch_sql = "SELECT c_id, q_id, question FROM all_qna " \
"WHERE question_dp IS NULL ORDER BY c_id, q_id;"
elif type == "patch":
#fetch_sql = "select c_id, q_id, answer, answer_start, context, context_morph from all_qna q, all_context c " \
# "where q.q_id LIKE '%-1' AND q.c_id = c.id ORDER BY c_id, q_id;"
fetch_sql = "SELECT c_id, q_id, answer, answer_start, context " \
"FROM (SELECT * FROM all_context_all WHERE context LIKE '%|%') t, all_qna q " \
"WHERE t.id = q.c_id " \
"ORDER BY c_id, q_id;"
elif type == "re_patch":
with open("check/re_patch_all.txt") as f: # 바꿔줌
qas = f.readlines()
elif type == "context":
# process only data created at certain season
fetch_sql = "select c_id, q_id, question, answer, answer_start, context from all_qna q, all_context c " \
"where q.q_id LIKE '%-1' AND q.q_id LIKE '{}_%' AND q.c_id = c.id AND question_morph is NULL and c.id> 0 " \
"ORDER BY CAST(c_id AS UNSIGNED), q_id;".format(season)
else:
logger.error("You select the wrong type({}). Please re-check your command".format(type))
exit()
if type != "re_patch":
self.cur.execute(fetch_sql)
qas = self.cur.fetchall()
logger.info("len of qas: {}".format(len(qas)))
workers = 20 # 20 으로 바꿈
r = 300 # 오류나서 300 을 200 으로 줄임
if type == "q_only":
#update_set_q(table_name, qas, 0, r)
with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as exe:
fs = {exe.submit(update_set_q, table_name, qas, n, r) for n in range(0, len(qas), r)}
elif type == "dp":
#get_dp_multi("context", self.db_cnf_dict, qas, 0, len(qas))
with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as exe:
fs = {exe.submit(get_dp_multi, "context", self.db_cnf_dict, qas, n, r) for n in range(0, len(qas), r)}
elif type == "dp_q":
#get_dp_multi("question", self.db_cnf_dict, qas, 0, len(qas))
with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as exe:
fs = {exe.submit(get_dp_multi, "question", self.db_cnf_dict, qas, n, r) for n in range(0, len(qas), r)}
elif type == "patch":
#patch(table_name, qas, 0, r)
with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as exe:
fs = {exe.submit(patch, table_name, qas, n, r) for n in range(0, len(qas), r)}
elif type == "re_patch":
# re_patch(table_name, qas, 0, r) # 위 r 의 수를 re_patch 해서 나온 오류 개수 만큼 늘려줘야함.
with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as exe:
fs = {exe.submit(re_patch, table_name, qas, n, r) for n in range(0, len(qas), r)}
elif type == "check_dp_length":
#check_dp_length(self.db_cnf_dict, qas, 0, r)
with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as exe:
fs = {exe.submit(check_dp_length, self.db_cnf_dict, qas, n, r) for n in range(0, len(qas), r)}
elif type == "context":
morph_core(table_name, qas, 0, r)
# with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as exe:
# fs = {exe.submit(morph_core, table_name, qas, n, r) for n in range(0, len(qas), r)}
def get_dp(self, q):
c_id = q[0]
ctx = q[1]
dp = self.nlp_analyze.get_dependency_parser_result(ctx)
sql = "UPDATE all_context SET context_dp = %s WHERE id = %s;"
self.cur.execute(sql, (str(dp), c_id))
def longtext(self):
sql = "SELECT id, context FROM all_context WHERE id = %s;"
self.cur.execute(sql, (2,))
row = self.cur.fetchone()
dp_content = self.nlp_analyze.get_dependency_parser_result(row[1])
update_sql = "UPDATE all_context SET context_dp = %s WHERE id = %s"
self.cur.execute(update_sql, (str(dp_content), row[0]))
logger.info("finished")
def create_dev_kang(self):
qid_list = list()
with open("dev_qids.txt") as f:
for line in f:
qid_list.append(line.strip())
qid_part = ', '.join(list(map(lambda x: '%s', qid_list)))
sql = "INSERT INTO dev_qna_kang SELECT * FROM all_qna " \
"WHERE q_id IN ({});".format(qid_part)
self.cur.execute(sql, qid_list)
cid_list = list()
for qid in qid_list:
sql = "SELECT c_id FROM all_qna WHERE q_id = %s;"
self.cur.execute(sql, (qid,))
c_id = self.cur.fetchone()[0]
logger.info(c_id)
cid_list.append(c_id)
cid_list = list(set(cid_list))
cid_part = ', '.join(list(map(lambda x: '%s', cid_list)))
sql = "INSERT INTO dev_context_kang SELECT * FROM all_context " \
"WHERE id IN ({});".format(cid_part)
self.cur.execute(sql, cid_list)
def check_dp_length(self, qas, n, r):
exec("j{} = SquadDb()".format(n))
processed_ctx_list = list()
logger.info("processing: {} ..< {}".format(n, n + r))
for q in qas[n:n+r]:
# id, context, context_morph, context_dp
c_id = q[0]; ctx = q[1]; ctx_morph = q[2]; ctx_dp = q[3]
new_ctx = eval("j{}".format(n)).nlp_analyze.get_tree_result(ctx)
try:
assert(len([x for x in eval(ctx_dp) if x['id'] == 0]) == len(eval(ctx_morph)))
except AssertionError:
logger.critical("Different sentence length: {}".format(c_id))
with open("check/sentence_length.txt", "a") as f:
f.write("{}\n".format(c_id))
try:
assert(len([x for x in eval(ctx_dp) if x['id'] == 0]) == len(new_ctx))
except AssertionError:
logger.error("len of new_dp != len of ctx_morph: {}".format(c_id))
exit()
'''if "-" in c_id:
c_id = c_id.split("-")[0]
if c_id not in processed_ctx_list:
update_sql = "UPDATE all_context SET context_morph = %s WHERE id = %s;"
eval("j{}".format(n)).cur.execute(update_sql, (str(new_ctx), c_id))
logger.info("ctx_morph update")'''
def re_patch(table_name, qas, n, r):
exec("j{} = SquadDb()".format(n))
print("j{} = SquadDb()".format(n))
eval("j{}.connect_db('{}')".format(n, table_name))
logger.info("Finish connecting to database...: {}".format(n))
logger.info("processing: {} ..< {}".format(n, n + r))
for q_line in qas[n:n+r]:
item = q_line.strip().split("\t")
fetch_sql = "select q.c_id, q.q_id, q.answer, q.answer_start, c.context from all_qna q, all_context c " \
"where q.c_id = %s AND q.q_id = %s AND q.c_id = c.id;"
eval("j{}".format(n)).cur.execute(fetch_sql, (item[0], item[1]))
q = eval("j{}".format(n)).cur.fetchone()
# q: c_id, q_id, answer, answer_start, context
c_id = q[0]; q_id = q[1]; answer = q[2]; answer_s = q[3]; ctx = q[4]
new_s, new_e, new_ctx, new_ans, is_skip, sentence_list = \
eval("j{}".format(n)).extract_passage(ctx, answer, answer_s, c_id, q_id)
sql = "INSERT INTO all_context_diff VALUE(%s, %s, %s);"
sql_diff = "SELECT u_id FROM all_context_diff WHERE depend_id = %s ORDER BY u_id DESC LIMIT 1;"
eval("j{}".format(n)).cur.execute(sql_diff, c_id)
uu = eval("j{}".format(n)).cur.fetchone()
if uu is None:
u_id = 1
else:
u_id = uu[0] + 1
eval("j{}".format(n)).cur.execute(sql, (c_id, u_id, new_ctx))
logger.info("Insert new c: {}-{}".format(c_id, u_id))
sql = "UPDATE all_qna SET c_id = %s WHERE c_id = %s AND q_id = %s;"
eval("j{}".format(n)).cur.execute(sql, ("{}-{}".format(c_id, u_id), c_id, q_id))
eval("j{}".format(n)).cur.execute(sql, ("{}-{}".format(c_id, u_id), c_id, "{}2".format(q_id[:-1])))
logger.info("Update q: {}".format(q_id))
logger.debug(new_ans)
def patch(table_name, qas, n, r):
exec("j{} = SquadDb()".format(n))
print("j{} = SquadDb()".format(n))
eval("j{}.connect_db('{}')".format(n, table_name))
logger.info("Finish connecting to database...: {}".format(n))
logger.info("processing: {} ..< {}".format(n, n + r))
processed_ctx = list()
for q in qas[n:n+r]:
# q: c_id, q_id, answer, answer_start, context
c_id = q[0]; q_id = q[1]; answer = q[2]; answer_s = q[3]; ctx = q[4]
new_s, new_e, new_ctx, new_ans, is_skip, sentence_list = \
eval("j{}".format(n)).extract_passage(ctx, answer, answer_s, c_id, q_id)
if is_skip:
logger.error(q)
exit()
logger.info(new_ans)
if c_id not in processed_ctx:
if "-" in c_id:
depend_id = c_id.split("-")[0]; u_id = c_id.split("-")[1]
sql = "UPDATE all_context_diff SET context_morph = %s WHERE depend_id = %s AND u_id = %s;"
eval("j{}".format(n)).cur.execute(sql, (str(new_ctx), depend_id, u_id))
else:
sql = "UPDATE all_context SET context_morph = %s WHERE id = %s;"
eval("j{}".format(n)).cur.execute(sql, (str(new_ctx), c_id))
logger.info("Update c: {}".format(c_id))
sql = "UPDATE all_qna SET answer_start_morph = %s, answer_end_morph = %s, answer_morph = %s " \
"WHERE c_id = %s AND q_id = %s;"
eval("j{}".format(n)).cur.execute(sql, (new_s, new_e, str(new_ans), c_id, q_id))
logger.info("Update q: {}".format(q_id))
re_quotation = re.compile(r"\[+[\"\'](\[\[.+\]\])[\"\']\]+")
def check_index(qas, n, r):
exec("j{} = SquadDb()".format(n))
return_list = list()
logger.info("processing: {} ..< {}".format(n, n + r))
for q in qas[n:n+r]:
ctx_plain, depth = eval("j{}".format(n)).make_plain_list(eval(q[6]))
answer_plain, depth = eval("j{}".format(n)).make_plain_list(eval(q[3]))
try:
assert (ctx_plain[q[4]:q[5]] == answer_plain)
except AssertionError:
return_list.append("{}\t{}\t{}\t{}".format(q[0], q[1], ctx_plain[q[4]:q[5]], answer_plain))
if len(return_list) != 0:
with open("check/re_patch_{}.txt".format(n), "a") as f:
f.write("\n".join(return_list))
f.write("\n")
def get_dp_multi(type, db_cnf, qas, n, r):
exec("j{} = SquadDb()".format(n))
print("j{} = SquadDb()".format(n))
eval("j{}.connect_db()".format(n))
logger.info("Finish connecting to database...: {}".format(n))
logger.info("processing: {} ..< {}".format(n, n + r))
for q in qas[n:n+r]:
c_id = q[0]
if type == "context":
txt = q[1]
elif type == "question":
q_id = q[1]
txt = q[2]
else:
logger.error("get_dp_multi - type is wrong. stop process..")
exit()
dp = eval("j{}".format(n)).nlp_analyze.get_dependency_parser_result(txt)
logger.debug(dp)
if type == "context":
sql = "UPDATE all_context SET context_dp = %s WHERE id = %s;"
eval("j{}".format(n)).cur.execute(sql, (str(dp), c_id))
elif type == "question":
sql = "UPDATE all_qna SET question_dp = %s WHERE c_id = %s AND q_id = %s;"
eval("j{}".format(n)).cur.execute(sql, (str(dp), c_id, q_id))
def update_set_q(table_name, qas, n, r):
exec("j{} = SquadDb()".format(n))
print("j{} = SquadDb()".format(n))
eval("j{}.connect_db('{}')".format(n, table_name))
logger.info("Finish connecting to database...: {}".format(n))
logger.info("processing: {} ..< {}".format(n, n + r))
for q in qas[n:n+r]:
question = q[2]
question_list = eval("j{}".format(n)).nlp_analyze.get_tree_result(question)
logger.debug(question_list)
fetch_sql = "SELECT answer_morph, answer_start_morph, answer_end_morph FROM all_qna " \
"WHERE c_id = %s AND q_id = %s;"
eval("j{}".format(n)).cur.execute(fetch_sql, [q[0], "{}1".format(q[1][:-1])]) # fetch '-1' info
original = eval("j{}".format(n)).cur.fetchone()
logger.debug(original)
update_sql = "UPDATE all_qna SET question_morph = %s, answer_morph = %s, " \
"answer_start_morph = %s, answer_end_morph = %s " \
"WHERE c_id = %s AND q_id = %s;"
val_tuple = (str(question_list), original[0], original[1], original[2], q[0], q[1])
logger.debug(val_tuple)
eval("j{}".format(n)).cur.execute(update_sql, val_tuple)
def morph_core(table_name, qas, n, r):
exec("j{} = SquadDb()".format(n))
eval("j{}.connect_db('{}')".format(n, table_name))
logger.info("Finish connecting to database...: {}".format(n))
# c_id, q_id, question, answer, answer_start, context
logger.info("processing: {} ..< {}".format(n, n + r))
for q in qas[n:n+r]:
question = q[2]
answer = q[3]
answer_start = q[4]
context = q[5]
try:
assert (context[answer_start:answer_start+len(answer)] == answer)
except AssertionError:
logger.info(q[1])
logger.critical("real answer: {}".format(answer))
logger.critical("extracted answer: {}".format(context[answer_start:answer_start+len(answer)]))
exit()
new_s, new_e, new_ctx, new_answer, isSkip, sentence_list = \
eval("j{}".format(n)).extract_passage(context, answer, answer_start, q[0], q[1])
logger.info("isskip: {}".format(isSkip))
if not isSkip:
# question
question_list = eval("j{}".format(n)).nlp_analyze.get_tree_result(question)
if q[0] not in eval("j{}".format(n)).processed_ctx_list:
sql = "UPDATE all_context SET context_morph = %s, context_sent = %s WHERE id = %s"
eval("j{}".format(n)).cur.execute(sql, (str(new_ctx), str(sentence_list), q[0]))
eval("j{}".format(n)).processed_ctx_list.append(q[0])
sql = "UPDATE all_qna SET question_morph = %s, answer_morph = %s, " \
"answer_start_morph = %s, answer_end_morph = %s " \
"WHERE c_id = %s AND q_id = %s;"
val_tuple = (str(question_list), str(new_answer), new_s, new_e, q[0], q[1])
logger.debug(val_tuple)
eval("j{}".format(n)).cur.execute(sql, val_tuple)
time.sleep(0.2)
if __name__ == "__main__":
try:
mode = sys.argv[1]
season = sys.argv[2]
db_table = sys.argv[3]
json_input = sys.argv[4]
start_id = sys.argv[5]
data_type = sys.argv[6]
except: print("")
j = SquadDb()
j.connect_db(db_table)
if mode == "squad2db":
j.squad2db(json_input, int(start_id), season, data_type, db_table)
elif mode == "context":
j.process_qa('context', season, db_table)
elif mode == "q_only":
j.process_qa('q_only', season, db_table)
elif mode == "check_data":
j.check_data(season)
elif mode == "re_patch":
j.process_qa('re_patch', season, db_table)
logger.info("All finished")
| [
"[email protected]"
] | |
247c239100ef619a331be5d46ae4dabbf1f51393 | bf69394cc6015f2c8ac28ae927be2a83b96facf3 | /lib/utils/training_stats.py | 6aff48aa5ddbb6c269cd19eb13e3b1621d6a791a | [
"MIT"
] | permissive | fangyuan-ksgk/Detectron.pytorch | bf1133b73763ec682b4f219a857e81515d86ebf5 | e8dfb86fbc68d30b9f443bc6aec722c5e4ce301e | refs/heads/master | 2023-03-16T04:48:22.648717 | 2018-04-30T14:54:28 | 2018-04-30T14:54:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,618 | py | #!/usr/bin/env python2
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Utilities for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict, OrderedDict
import datetime
import numpy as np
from core.config import cfg
from utils.logging import log_stats
from utils.logging import SmoothedValue
from utils.timer import Timer
import utils.net as nu
class TrainingStats(object):
"""Track vital training statistics."""
def __init__(self, misc_args, log_period=20, tensorboard_logger=None):
# Output logging period in SGD iterations
self.misc_args = misc_args
self.LOG_PERIOD = log_period
self.tblogger = tensorboard_logger
self.tb_ignored_keys = ['iter', 'eta']
self.iter_timer = Timer()
# Window size for smoothing tracked values (with median filtering)
self.WIN_SZ = 20
def create_smoothed_value():
return SmoothedValue(self.WIN_SZ)
self.smoothed_losses = defaultdict(create_smoothed_value)
self.smoothed_metrics = defaultdict(create_smoothed_value)
self.smoothed_total_loss = SmoothedValue(self.WIN_SZ)
def IterTic(self):
self.iter_timer.tic()
def IterToc(self):
return self.iter_timer.toc(average=False)
def ResetIterTimer(self):
self.iter_timer.reset()
def UpdateIterStats(self, model_out):
"""Update tracked iteration statistics."""
total_loss = 0
if cfg.FPN.FPN_ON:
loss_rpn_cls_value = 0
loss_rpn_bbox_value = 0
for k, loss in model_out['losses'].items():
assert loss.shape[0] == cfg.NUM_GPUS
loss = loss.mean(dim=0)
total_loss += loss
loss_data = loss.data[0]
self.smoothed_losses[k].AddValue(loss_data)
model_out['losses'][k] = loss
if k.startswith('loss_rpn_cls'):
loss_rpn_cls_value += loss_data
elif k.startswith('loss_rpn_bbox'):
loss_rpn_bbox_value += loss_data
self.smoothed_total_loss.AddValue(total_loss.data[0])
model_out['total_loss'] = total_loss
if cfg.FPN.FPN_ON:
self.smoothed_losses['loss_rpn_cls'].AddValue(loss_rpn_cls_value)
self.smoothed_losses['loss_rpn_bbox'].AddValue(loss_rpn_bbox_value)
for k, metric in model_out['metrics'].items():
metric = metric.mean(dim=0)
self.smoothed_metrics[k].AddValue(metric.data[0])
model_out['metrics'][k] = metric
def LogIterStats(self, cur_iter, lr):
"""Log the tracked statistics."""
if (cur_iter % self.LOG_PERIOD == 0 or
cur_iter == cfg.SOLVER.MAX_ITER - 1):
stats = self.GetStats(cur_iter, lr)
log_stats(stats, self.misc_args)
if self.tblogger:
self.tb_log_stats(stats, cur_iter)
def tb_log_stats(self, stats, cur_iter):
"""Log the tracked statistics to tensorboard"""
for k in stats:
if k not in self.tb_ignored_keys:
v = stats[k]
if isinstance(v, dict):
self.tb_log_stats(v, cur_iter)
else:
self.tblogger.add_scalar(k, v, cur_iter)
def GetStats(self, cur_iter, lr):
eta_seconds = self.iter_timer.average_time * (
cfg.SOLVER.MAX_ITER - cur_iter
)
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
stats = OrderedDict(
iter=cur_iter + 1, # 1-indexed
time=self.iter_timer.average_time,
eta=eta,
loss=self.smoothed_total_loss.GetMedianValue(),
lr=lr,
)
stats['metrics'] = OrderedDict()
for k in sorted(self.smoothed_metrics):
stats['metrics'][k] = self.smoothed_metrics[k].GetMedianValue()
head_losses = []
rpn_losses = []
rpn_fpn_cls_losses = []
rpn_fpn_bbox_losses = []
for k, v in self.smoothed_losses.items():
toks = k.split('_')
if len(toks) == 2:
head_losses.append((k, v.GetMedianValue()))
elif len(toks) == 3:
rpn_losses.append((k, v.GetMedianValue()))
elif len(toks) == 4 and toks[2] == 'cls':
rpn_fpn_cls_losses.append((k, v.GetMedianValue()))
elif len(toks) == 4 and toks[2] == 'bbox':
rpn_fpn_bbox_losses.append((k, v.GetMedianValue()))
else:
raise ValueError("Unexpected loss key: %s" % k)
stats['head_losses'] = OrderedDict(head_losses)
stats['rpn_losses'] = OrderedDict(rpn_losses)
stats['rpn_fpn_cls_losses'] = OrderedDict(rpn_fpn_cls_losses)
stats['rpn_fpn_bbox_losses'] = OrderedDict(rpn_fpn_bbox_losses)
return stats
| [
"[email protected]"
] | |
58387329bb15b94260f2528c77fccfb21cdb8190 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /SqbyWYwqChQroXfhu_23.py | 85771451090fc8484ee47a515d24c515837f537c | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | """
This challenge concerns _square matrices_ (same number of rows and columns) as
the below example illustrates:
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
The entries in the diagonal line from the top left to the bottom right form
the _main diagonal_ of the matrix. In this case, 1,5,9 form the main diagonal.
Write a function that returns the matrix obtained by replacing the entries
_above_ the main diagonal with 0s.
For example, for the matrix above you should return:
[
[1, 0, 0],
[4, 5, 0],
[7, 8, 9]
]
### Examples
lower_triang([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]) ➞ [
[1, 0, 0],
[4, 5, 0],
[7, 8, 9]
]
lower_triang([
[5, 7],
[7, 9]
]) ➞ [
[5, 0],
[7, 9]
]
lower_triang([
[1, 8, 8, 1],
[2, 7, 7, 2],
[3, 6, 6, 3],
[4, 5, 5, 4]
]) ➞ [
[1, 0, 0, 0],
[2, 7, 0, 0],
[3, 6, 6, 0],
[4, 5, 5, 4]
]
### Notes
* As in the examples, the size of the matrices will vary (but they will always be square).
* In Linear Algebra, matrices with 0s above the diagonal are called _lower triangular matrices_.
"""
def lower_triang(arr):
for i in range(len(arr)):
for j in range(len(arr[i])):
if j < i:
arr[j][i] = 0
return arr
| [
"[email protected]"
] | |
788398854e79143d77bd7bcbbc79202a74d49414 | 3e1beedf80c60153482192b086347d0530701c37 | /problem solving/cinema.py | cfee1d1abc3a0f3637d3211ba9876bb01e88668e | [] | no_license | rishi772001/Competetive-programming | ac130bde426844e09a3e5162e279d61278c7c502 | 3493991cac55f225eeee67dd49f1caed8211465c | refs/heads/master | 2023-04-12T14:59:59.447354 | 2021-04-30T05:05:13 | 2021-04-30T05:05:13 | 267,785,820 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py | # https://leetcode.com/problems/cinema-seat-allocation/
n = 2
booked = [[1,6],[1,8],[1,3],[2,3],[1,10],[1,2],[1,5],[2,2],[2,4],[2,10],[1,7],[2,5]]
theatre = [[0]*10 for i in range(n)]
for i in range(len(booked)):
theatre[booked[i][0] - 1][booked[i][1] - 1] += 1
print(theatre)
count = 0
for i in range(len(theatre)):
sum = theatre[i][1] + theatre[i][2] + theatre[i][3] + theatre[i][4]
j = 5
flag = False
if sum == 0:
count += 1
sum = theatre[i][j] + theatre[i][j + 1] + theatre[i][j + 2] + theatre[i][j + 3]
j = j + 3
while j < 10:
if j - 4 == 1 or j - 4 == 3 or j - 4 == 5:
sum += theatre[i][j]
sum -= theatre[i][j - 4]
j += 1
continue
if (sum == 0):
count += 1
if(j + 4 < 10):
sum = theatre[i][j] + theatre[i][j + 1] + theatre[i][j + 2] + theatre[i][j + 3]
j += 3
else:
break
sum += theatre[i][j]
sum -= theatre[i][j - 4]
j += 1
print(count)
| [
"[email protected]"
] | |
ff54c9c913e9a0d876744c59e5036fa4992f106c | cbab8b9218b4c7965e6d1dacfb2104e4096c18d1 | /backend/helo_tesrt_dev_2404/urls.py | fbe6d1e20a84147bb29ba5acf7482a6bb158ce79 | [] | no_license | crowdbotics-apps/helo-tesrt-dev-2404 | 480408a7b426413ebaa8803e8e754efae08ea091 | 44599c5058dc7b284bdd3eaba3e38b2b5d5c33db | refs/heads/master | 2023-02-06T01:56:10.488868 | 2020-04-07T15:51:58 | 2020-04-07T15:51:58 | 253,755,570 | 0 | 0 | null | 2023-01-24T01:57:30 | 2020-04-07T10:04:28 | JavaScript | UTF-8 | Python | false | false | 1,929 | py | """helo_tesrt_dev_2404 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "helo tesrt"
admin.site.site_title = "helo tesrt Admin Portal"
admin.site.index_title = "helo tesrt Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="helo tesrt API",
default_version="v1",
description="API documentation for helo tesrt App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"[email protected]"
] | |
a75ea16fc96deba8ed6592b54837c4622da43d54 | b9a2097b1ff526f0f980cb44f321ecdecc071baf | /backend/manage.py | 4772a9003f0358346446794a2b58b87dce416f4e | [] | no_license | crowdbotics-apps/nwh-elkhart-metrics-26614 | ce08c984d6c939b7f7cd5158b5c39fe37be94dcc | e86088482281f83fe789ce0b492e76981df1c08c | refs/heads/master | 2023-05-01T08:17:44.464562 | 2021-05-12T18:42:43 | 2021-05-12T18:42:43 | 366,794,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE", "nwh_elkhart_metrics_26614.settings"
)
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
c697d125b0367a7834e07b1127c2335e79570e79 | 2dfbb018568209864544375de59a157c8752689a | /skimreads/comments/migrations/0002_auto__del_field_comment_reading__add_field_comment_note.py | 7b339a69401d147362ef5085086bdfaa985c3fb8 | [] | no_license | tommydangerous/skimreads | 7df4bde603c6122f20242d4591357802a4484f9f | 6e73341ab034b52bb48cde4f076948946944d2a9 | refs/heads/master | 2020-05-17T23:20:15.020065 | 2014-09-27T06:28:34 | 2014-09-27T06:28:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,250 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Comment.reading'
db.delete_column('comments_comment', 'reading_id')
# Adding field 'Comment.note'
db.add_column('comments_comment', 'note',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['readings.Note']),
keep_default=False)
def backwards(self, orm):
# Adding field 'Comment.reading'
db.add_column('comments_comment', 'reading',
self.gf('django.db.models.fields.related.ForeignKey')(default='', to=orm['readings.Reading']),
keep_default=False)
# Deleting field 'Comment.note'
db.delete_column('comments_comment', 'note_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'comments.comment': {
'Meta': {'object_name': 'Comment'},
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['readings.Note']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'readings.note': {
'Meta': {'ordering': "['created']", 'object_name': 'Note'},
'content': ('django.db.models.fields.TextField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reading': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['readings.Reading']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'readings.reading': {
'Meta': {'object_name': 'Reading'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
}
}
complete_apps = ['comments'] | [
"[email protected]"
] | |
95085f0f6148d3aeba523e3cba43e37d56a4cc60 | 908336e941d7d95d2ff168f8d132bf5656b87752 | /datasets/weibo_senti_100k/parse.py | ad104fde40447e6e0371f6df42df4903898a6e89 | [] | no_license | cyy0523xc/ChineseNlpCorpus | 364437b5662bc0a138281afc817b375c50a7fecf | a027225e9caf963d0d4e38d96b402ce515505850 | refs/heads/master | 2020-03-22T05:45:09.343135 | 2018-12-18T02:14:08 | 2018-12-18T02:14:08 | 139,587,654 | 2 | 0 | null | 2018-07-03T13:29:28 | 2018-07-03T13:29:27 | null | UTF-8 | Python | false | false | 612 | py | # -*- coding: utf-8 -*-
#
#
# Author: alex
# Created Time: 2018年07月03日 星期二 21时51分54秒
import csv
with open('./weibo_senti_100k.csv', encoding='utf8') as r, \
open('../../format_datasets/weibo_senti_100k/weibo_senti_100k_pos.txt', 'w', encoding='utf8') as pos, \
open('../../format_datasets/weibo_senti_100k/weibo_senti_100k_neg.txt', 'w', encoding='utf8') as neg:
for row in csv.DictReader(r):
content = row['review'].replace("\n", ' ').strip() + "\n"
if row['label'] == '1':
pos.write(content)
else:
neg.write(content)
print('ok')
| [
"[email protected]"
] | |
fa6df867465274ac8444a135a311aa00afd86d2c | 48d08e7c20628479ea69b4a1a51f99a3db26c79d | /MathPy/04_sympy_intro.py | 9a8b4683b4bf945cb6a3376f182c1efe1b83b73d | [] | no_license | PyRPy/stats_py | 59ae0975c5b549fb47f7630b1f232caf715fe2ff | 0c87ebf7f84eb7a21bcedb3234170ef220ca2f14 | refs/heads/master | 2022-09-27T21:01:53.316765 | 2022-09-17T01:52:09 | 2022-09-17T01:52:09 | 167,268,454 | 4 | 6 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | from sympy import Symbol
# ------------------Defining Symbols and Symbolic Operations -------------------
x = Symbol('x')
print(x + x + 1)
a = Symbol('x')
print(a + a + 1)
# find the original symbol object
print(a.name)
# define multiple symbols
from sympy import symbols
x, y, z = symbols('x, y, z')
s = x*(x + y) + x*(y + z)
print(s)
print(x*x*(1 + x))
| [
"[email protected]"
] | |
34019fe74d66ee473c9f78d9730d9b933cee8973 | 4007a7626ccb18480e73ac304b0010f6aeba33fb | /proj_preproc/db.py | cb9defbbd56297c1bca17df0cef2ee205afdb103 | [] | no_license | olmozavala/air_pollution_forecast | 68030748b975d463158f1ce7c7f16eb038493ced | 5b543b3f4a190d7ae33a55c4f5b30f56b17347c3 | refs/heads/master | 2023-07-22T15:16:31.166036 | 2023-06-08T18:55:14 | 2023-06-08T18:55:14 | 226,166,662 | 0 | 0 | null | 2023-02-16T18:40:24 | 2019-12-05T18:41:50 | Python | UTF-8 | Python | false | false | 129 | py | def eliminateNonContinuousTimes(data, numhours):
"""It eliminates those 'rows' that do not contain 'numhours' continuously""" | [
"[email protected]"
] | |
bdce9ca6acb87cf1e40299efade42b89dec4c38a | 9de27e623c85b0d55da4afe4d843fe321b77954d | /Configuration/Geometry/python/GeometryDD4hepExtended2026D76_cff.py | 1905e4de5a22bdb37bd5f0728b7fe57a842f0dc7 | [
"Apache-2.0"
] | permissive | PFCal-dev/cmssw | a97d566d691bc5ac900e48c632f4e87a005f94a2 | 232187f0f8a201210426312b27a1b62e55b6084c | refs/heads/hgc-tpg-devel-CMSSW_12_0_0_pre3 | 2022-06-01T08:27:39.166655 | 2021-11-23T15:28:18 | 2021-11-23T15:28:18 | 14,498,276 | 4 | 7 | Apache-2.0 | 2022-02-08T11:01:38 | 2013-11-18T16:34:32 | C++ | UTF-8 | Python | false | false | 924 | py | import FWCore.ParameterSet.Config as cms
# This config was generated automatically using generate2026Geometry.py
# If you notice a mistake, please update the generating script, not just this config
from Configuration.Geometry.GeometryDD4hep_cff import *
DDDetectorESProducer.confGeomXMLFiles = cms.FileInPath("Geometry/CMSCommonData/data/dd4hep/cmsExtendedGeometry2026D76.xml")
from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi import *
from Geometry.EcalCommonData.ecalSimulationParameters_cff import *
from Geometry.HcalCommonData.hcalDDDSimConstants_cff import *
from Geometry.HGCalCommonData.hgcalParametersInitialization_cfi import *
from Geometry.HGCalCommonData.hgcalNumberingInitialization_cfi import *
from Geometry.MuonNumbering.muonGeometryConstants_cff import *
from Geometry.MuonNumbering.muonOffsetESProducer_cff import *
from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import *
| [
"[email protected]"
] | |
bc47d286fda4479958fbd49dd8f596957c627662 | d83118503614bb83ad8edb72dda7f449a1226f8b | /src/dprj/platinumegg/app/cabaret/views/application/scoutevent/resultanim.py | e2efa0cc4796d7c81b9432230cea05603a9db449 | [] | no_license | hitandaway100/caba | 686fe4390e182e158cd9714c90024a082deb8c69 | 492bf477ac00c380f2b2758c86b46aa7e58bbad9 | refs/heads/master | 2021-08-23T05:59:28.910129 | 2017-12-03T19:03:15 | 2017-12-03T19:03:15 | 112,512,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,483 | py | # -*- coding: utf-8 -*-
from platinumegg.app.cabaret.util.cabareterror import CabaretError
from platinumegg.app.cabaret.util.api import BackendApi
import settings
from platinumegg.app.cabaret.util.url_maker import UrlMaker
from platinumegg.app.cabaret.views.application.scoutevent.base import ScoutHandler
import urllib
from defines import Defines
import settings_sub
class Handler(ScoutHandler):
"""スカウト結果.
引数:
実行したスカウトのID.
確認キー.
結果のindex.
"""
@classmethod
def getViewerPlayerClassList(cls):
return []
def process(self):
self.__swf_params = {}
args = self.getUrlArgs('/sceventresultanim/')
try:
stageid = int(args.get(0))
scoutkey = urllib.unquote(args.get(1))
index = int(args.get(2) or 0)
except:
raise CabaretError(u'引数が不正です', CabaretError.Code.ILLEGAL_ARGS)
v_player = self.getViewerPlayer()
model_mgr = self.getModelMgr()
using = settings.DB_READONLY
flag_skip = BackendApi.get_scoutskip_flag(v_player.id)
eventmaster = BackendApi.get_current_scouteventmaster(model_mgr, using=using)
if eventmaster is None:
raise CabaretError(u'Event Closed.', CabaretError.Code.EVENT_CLOSED)
mid = eventmaster.id
# 進行情報.
playdata = BackendApi.get_event_playdata(model_mgr, mid, v_player.id, using)
if playdata and playdata.confirmkey == scoutkey:
# DBからとり直すべき.
playdata = BackendApi.get_event_playdata(model_mgr, mid, v_player.id, using=settings.DB_DEFAULT, reflesh=True)
if playdata is None or playdata.alreadykey != scoutkey:
if settings_sub.IS_LOCAL:
raise CabaretError(u'キーが正しくありません %s vs %s' % (playdata.alreadykey if playdata else 'None', scoutkey))
url = self.makeAppLinkUrlRedirect(UrlMaker.scoutevent())
self.appRedirect(url)
return
eventlist = playdata.result.get('event', [])[index:]
if not eventlist:
raise CabaretError(u'引数が不正です', CabaretError.Code.ILLEGAL_ARGS)
table = {
Defines.ScoutEventType.COMPLETE : (self.procComplete, False),
Defines.ScoutEventType.LEVELUP : (self.procLevelup, True),
Defines.ScoutEventType.HAPPENING : (self.procHappening, True),
}
proc = None
next_event = None
for idx, event in enumerate(eventlist):
next_event = eventlist[idx+1] if (idx + 1) < len(eventlist) else None
tmp = table.get(event.get_type(), None)
if tmp is None:
index += idx
break
tmp_proc, is_skipok = tmp
if flag_skip and is_skipok:
continue
index += idx
proc = tmp_proc
break
if not proc:
url = UrlMaker.scouteventresult(stageid, scoutkey)
self.appRedirect(self.makeAppLinkUrlRedirect(url))
return
if next_event and table.has_key(next_event.get_type()):
url = UrlMaker.scouteventresultanim(stageid, scoutkey, index+1)
else:
url = UrlMaker.scouteventresult(stageid, scoutkey)
self.__swf_params['backUrl'] = self.makeAppLinkUrl(url)
self.__playdata = playdata
proc(event)
def procComplete(self, event):
"""スカウト完了演出.
"""
self.__swf_params['text'] = Defines.EffectTextFormat.SCOUTRESULT_COMPLETE_TEXT
self.appRedirectToEffect('scoutclear/effect.html', self.__swf_params)
def procLevelup(self, event):
"""レベルアップ演出.
"""
resulttexts = []
# レベル情報.
resulttexts.append(Defines.EffectTextFormat.LEVELUP_STATUSTEXT % event.level)
self.__swf_params['statusText'] = u'\n'.join(resulttexts)
self.appRedirectToEffect('levelup/effect.html', self.__swf_params)
def procHappening(self, event):
"""ハプニング発生演出.
"""
self.appRedirectToEffect('chohutokyaku/effect.html', self.__swf_params)
def main(request):
return Handler.run(request)
| [
"[email protected]"
] | |
d1059b3e7acad5c413ee529d9f6dcd5d530089a0 | b2135e3fc77666f043f0fbafd0d88ed9865d5b4f | /7183 Python Basics/05 Chapter 1.5 - About Lists/01 List basics/78629_06_code.py | 6793446fdad2714f94b65334724482bacae4071d | [] | no_license | Felienne/spea | 164d05e9fbba82c7b7df8d00295f7157054f9248 | ecb06c66aaf6a2dced3f141ca415be9efb7dbff5 | refs/heads/master | 2020-03-17T17:35:27.302219 | 2018-05-17T10:14:49 | 2018-05-17T10:14:49 | 133,794,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | # What is the length of an empty list?
nothing = []
length = len(nothing)
assertEqual(length, __) | [
"[email protected]"
] | |
6e6f3dccdbcc5e1215398c4c2605d64ab759adb7 | 4cdf4e243891c0aa0b99dd5ee84f09a7ed6dd8c8 | /python/decorator/12.py | 88fcefe3e80c692e7cf23963349684364a407982 | [
"MIT"
] | permissive | gozeon/code-collections | 464986c7765df5dca980ac5146b847416b750998 | 13f07176a6c7b6ac13586228cec4c1e2ed32cae4 | refs/heads/master | 2023-08-17T18:53:24.189958 | 2023-08-10T04:52:47 | 2023-08-10T04:52:47 | 99,432,793 | 1 | 0 | NOASSERTION | 2020-07-17T09:25:44 | 2017-08-05T15:56:53 | JavaScript | UTF-8 | Python | false | false | 395 | py | #coding=utf-8
# -*- coding=utf-8 -*-
from functools import wraps
def my_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
'''decorator'''
print('Calling decorated function...')
return func(*args, **kwargs)
return wrapper
@my_decorator
def example():
"""Docstring"""
print('Called example function')
print(example.__name__, example.__doc__)
| [
"[email protected]"
] | |
649d76925e81b3a260732ead9f7c2f79e696308d | 9f387c703dbf4d970d0259424c7b299108c369f5 | /dd_sdk_1_0/dd_sdk_1_0/api/ddboost_storage_units_api.py | 9c5ab1a86a2c4d83f0fe5eb6e1108b8fe66de0c0 | [] | no_license | gcezaralmeida/datadomain_sdk_python | c989e6846bae9435c523ab09e230fc12d020f7f1 | e102ec85cea5d888c8329626892347571832e079 | refs/heads/main | 2023-08-23T22:42:47.083754 | 2021-10-25T21:52:49 | 2021-10-25T21:52:49 | 370,805,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60,436 | py | # coding: utf-8
"""
DataDomain Rest API Documentation
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from dd_sdk_1_0.api_client import ApiClient
class DdboostStorageUnitsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get(self, system_id, **kwargs): # noqa: E501
"""Get DDBoost storage unit information. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get(system_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param int page: page number, starting from 0 @#$type=xs:unsignedInt
:param int size: Paging size @#$type=xs:unsignedInt
:param str sort: sort=\"name,role\". For descending order, prefix the key with a dash (-). Ex: -name @#$type=ddboostStorageUnitSortQuery
:param str filter: filter=\"name=value\". value should be a valid regular expression. @#$type=ddboostStorageUnitFilterQuery
:param str exclude_fields: Comma separated list of fields to be excluded from response object. Required and general fields such as paging will not be excluded. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:param str include_fields: Comma separated list of fields to be included in response object. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:return: DdboostStorageUnitInfos
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get_with_http_info(system_id, **kwargs) # noqa: E501
else:
(data) = self.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get_with_http_info(system_id, **kwargs) # noqa: E501
return data
def rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get_with_http_info(self, system_id, **kwargs): # noqa: E501
"""Get DDBoost storage unit information. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get_with_http_info(system_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param int page: page number, starting from 0 @#$type=xs:unsignedInt
:param int size: Paging size @#$type=xs:unsignedInt
:param str sort: sort=\"name,role\". For descending order, prefix the key with a dash (-). Ex: -name @#$type=ddboostStorageUnitSortQuery
:param str filter: filter=\"name=value\". value should be a valid regular expression. @#$type=ddboostStorageUnitFilterQuery
:param str exclude_fields: Comma separated list of fields to be excluded from response object. Required and general fields such as paging will not be excluded. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:param str include_fields: Comma separated list of fields to be included in response object. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:return: DdboostStorageUnitInfos
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['system_id', 'authorization', 'x_dd_auth_token', 'page', 'size', 'sort', 'filter', 'exclude_fields', 'include_fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if self.api_client.client_side_validation and ('system_id' not in params or
params['system_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `system_id` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get`") # noqa: E501
if self.api_client.client_side_validation and ('x_dd_auth_token' in params and
len(params['x_dd_auth_token']) < 1):
raise ValueError("Invalid value for parameter `x_dd_auth_token` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and ('page' in params and params['page'] < 0): # noqa: E501
raise ValueError("Invalid value for parameter `page` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get`, must be a value greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and ('size' in params and params['size'] < 0): # noqa: E501
raise ValueError("Invalid value for parameter `size` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get`, must be a value greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and ('sort' in params and not re.search(r'^(\\s*-?(name)\\s*,)*\\s*-?(name)\\s*$', params['sort'])): # noqa: E501
raise ValueError("Invalid value for parameter `sort` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get`, must conform to the pattern `/^(\\s*-?(name)\\s*,)*\\s*-?(name)\\s*$/`") # noqa: E501
if self.api_client.client_side_validation and ('filter' in params and not re.search(r'^(\\s*(name)\\s*=\\s*(\\S*|(\\([^,]*,[^\\)]*\\))|(\"([^\"]*(\\\")*)*\"))\\s+[aA][nN][dD]\\s+)*\\s*(name)\\s*=\\s*(\\S*|(\\([^,]*,[^\\)]*\\))|(\"([^\"]*(\\\")*)*\"))\\s*$', params['filter'])): # noqa: E501
raise ValueError("Invalid value for parameter `filter` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get`, must conform to the pattern `/^(\\s*(name)\\s*=\\s*(\\S*|(\\([^,]*,[^\\)]*\\))|(\"([^\"]*(\\\")*)*\"))\\s+[aA][nN][dD]\\s+)*\\s*(name)\\s*=\\s*(\\S*|(\\([^,]*,[^\\)]*\\))|(\"([^\"]*(\\\")*)*\"))\\s*$/`") # noqa: E501
if self.api_client.client_side_validation and ('exclude_fields' in params and not re.search(r'^([^,]+,*)+$', params['exclude_fields'])): # noqa: E501
raise ValueError("Invalid value for parameter `exclude_fields` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get`, must conform to the pattern `/^([^,]+,*)+$/`") # noqa: E501
if self.api_client.client_side_validation and ('include_fields' in params and not re.search(r'^([^,]+,*)+$', params['include_fields'])): # noqa: E501
raise ValueError("Invalid value for parameter `include_fields` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_get`, must conform to the pattern `/^([^,]+,*)+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'system_id' in params:
path_params['SYSTEM-ID'] = params['system_id'] # noqa: E501
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
if 'exclude_fields' in params:
query_params.append(('exclude_fields', params['exclude_fields'])) # noqa: E501
if 'include_fields' in params:
query_params.append(('include_fields', params['include_fields'])) # noqa: E501
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
if 'x_dd_auth_token' in params:
header_params['X-DD-AUTH-TOKEN'] = params['x_dd_auth_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/rest/v1.0/dd-systems/{SYSTEM-ID}/protocols/ddboost/storage-units', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DdboostStorageUnitInfos', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_delete(self, system_id, id, **kwargs): # noqa: E501
"""Delete a Storage Unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_delete(system_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str id: ddboost storage units identifier. @#$type=xs:string (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:return: ServiceStatus
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_delete_with_http_info(system_id, id, **kwargs) # noqa: E501
else:
(data) = self.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_delete_with_http_info(system_id, id, **kwargs) # noqa: E501
return data
def rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_delete_with_http_info(self, system_id, id, **kwargs): # noqa: E501
"""Delete a Storage Unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_delete_with_http_info(system_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str id: ddboost storage units identifier. @#$type=xs:string (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:return: ServiceStatus
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['system_id', 'id', 'authorization', 'x_dd_auth_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if self.api_client.client_side_validation and ('system_id' not in params or
params['system_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `system_id` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_delete`") # noqa: E501
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_delete`") # noqa: E501
if self.api_client.client_side_validation and ('x_dd_auth_token' in params and
len(params['x_dd_auth_token']) < 1):
raise ValueError("Invalid value for parameter `x_dd_auth_token` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_delete`, length must be greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'system_id' in params:
path_params['SYSTEM-ID'] = params['system_id'] # noqa: E501
if 'id' in params:
path_params['ID'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
if 'x_dd_auth_token' in params:
header_params['X-DD-AUTH-TOKEN'] = params['x_dd_auth_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/rest/v1.0/dd-systems/{SYSTEM-ID}/protocols/ddboost/storage-units/{ID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ServiceStatus', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get(self, system_id, id, **kwargs): # noqa: E501
"""Get a DDBoost storage unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get(system_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str id: ddboost storage units identifier. @#$type=xs:string (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str exclude_fields: Comma separated list of fields to be excluded from response object. Required and general fields such as paging will not be excluded. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:param str include_fields: Comma separated list of fields to be included in response object. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:return: DdboostStorageUnitInfoDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get_with_http_info(system_id, id, **kwargs) # noqa: E501
else:
(data) = self.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get_with_http_info(system_id, id, **kwargs) # noqa: E501
return data
def rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get_with_http_info(self, system_id, id, **kwargs): # noqa: E501
"""Get a DDBoost storage unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get_with_http_info(system_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str id: ddboost storage units identifier. @#$type=xs:string (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str exclude_fields: Comma separated list of fields to be excluded from response object. Required and general fields such as paging will not be excluded. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:param str include_fields: Comma separated list of fields to be included in response object. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:return: DdboostStorageUnitInfoDetail
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['system_id', 'id', 'authorization', 'x_dd_auth_token', 'exclude_fields', 'include_fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if self.api_client.client_side_validation and ('system_id' not in params or
params['system_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `system_id` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get`") # noqa: E501
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get`") # noqa: E501
if self.api_client.client_side_validation and ('x_dd_auth_token' in params and
len(params['x_dd_auth_token']) < 1):
raise ValueError("Invalid value for parameter `x_dd_auth_token` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and ('exclude_fields' in params and not re.search(r'^([^,]+,*)+$', params['exclude_fields'])): # noqa: E501
raise ValueError("Invalid value for parameter `exclude_fields` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get`, must conform to the pattern `/^([^,]+,*)+$/`") # noqa: E501
if self.api_client.client_side_validation and ('include_fields' in params and not re.search(r'^([^,]+,*)+$', params['include_fields'])): # noqa: E501
raise ValueError("Invalid value for parameter `include_fields` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_get`, must conform to the pattern `/^([^,]+,*)+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'system_id' in params:
path_params['SYSTEM-ID'] = params['system_id'] # noqa: E501
if 'id' in params:
path_params['ID'] = params['id'] # noqa: E501
query_params = []
if 'exclude_fields' in params:
query_params.append(('exclude_fields', params['exclude_fields'])) # noqa: E501
if 'include_fields' in params:
query_params.append(('include_fields', params['include_fields'])) # noqa: E501
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
if 'x_dd_auth_token' in params:
header_params['X-DD-AUTH-TOKEN'] = params['x_dd_auth_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/rest/v1.0/dd-systems/{SYSTEM-ID}/protocols/ddboost/storage-units/{ID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DdboostStorageUnitInfoDetail', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_put(self, system_id, id, ddboost_storage_unit_modify, **kwargs): # noqa: E501
"""Modify a Storage Unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_put(system_id, id, ddboost_storage_unit_modify, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str id: ddboost storage units identifier. @#$type=xs:string (required)
:param DdboostStorageUnitModify ddboost_storage_unit_modify: (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:return: DdboostStorageUnitInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_put_with_http_info(system_id, id, ddboost_storage_unit_modify, **kwargs) # noqa: E501
else:
(data) = self.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_put_with_http_info(system_id, id, ddboost_storage_unit_modify, **kwargs) # noqa: E501
return data
def rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_put_with_http_info(self, system_id, id, ddboost_storage_unit_modify, **kwargs): # noqa: E501
"""Modify a Storage Unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_put_with_http_info(system_id, id, ddboost_storage_unit_modify, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str id: ddboost storage units identifier. @#$type=xs:string (required)
:param DdboostStorageUnitModify ddboost_storage_unit_modify: (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:return: DdboostStorageUnitInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['system_id', 'id', 'ddboost_storage_unit_modify', 'authorization', 'x_dd_auth_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if self.api_client.client_side_validation and ('system_id' not in params or
params['system_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `system_id` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_put`") # noqa: E501
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_put`") # noqa: E501
# verify the required parameter 'ddboost_storage_unit_modify' is set
if self.api_client.client_side_validation and ('ddboost_storage_unit_modify' not in params or
params['ddboost_storage_unit_modify'] is None): # noqa: E501
raise ValueError("Missing the required parameter `ddboost_storage_unit_modify` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_put`") # noqa: E501
if self.api_client.client_side_validation and ('x_dd_auth_token' in params and
len(params['x_dd_auth_token']) < 1):
raise ValueError("Invalid value for parameter `x_dd_auth_token` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_id_put`, length must be greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'system_id' in params:
path_params['SYSTEM-ID'] = params['system_id'] # noqa: E501
if 'id' in params:
path_params['ID'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
if 'x_dd_auth_token' in params:
header_params['X-DD-AUTH-TOKEN'] = params['x_dd_auth_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'ddboost_storage_unit_modify' in params:
body_params = params['ddboost_storage_unit_modify']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/rest/v1.0/dd-systems/{SYSTEM-ID}/protocols/ddboost/storage-units/{ID}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DdboostStorageUnitInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_post(self, system_id, ddboost_storage_unit_create, **kwargs): # noqa: E501
"""Create a Storage Unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_post(system_id, ddboost_storage_unit_create, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param DdboostStorageUnitCreate ddboost_storage_unit_create: (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:return: DdboostStorageUnitInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_post_with_http_info(system_id, ddboost_storage_unit_create, **kwargs) # noqa: E501
else:
(data) = self.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_post_with_http_info(system_id, ddboost_storage_unit_create, **kwargs) # noqa: E501
return data
def rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_post_with_http_info(self, system_id, ddboost_storage_unit_create, **kwargs): # noqa: E501
"""Create a Storage Unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_post_with_http_info(system_id, ddboost_storage_unit_create, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param DdboostStorageUnitCreate ddboost_storage_unit_create: (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:return: DdboostStorageUnitInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['system_id', 'ddboost_storage_unit_create', 'authorization', 'x_dd_auth_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if self.api_client.client_side_validation and ('system_id' not in params or
params['system_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `system_id` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_post`") # noqa: E501
# verify the required parameter 'ddboost_storage_unit_create' is set
if self.api_client.client_side_validation and ('ddboost_storage_unit_create' not in params or
params['ddboost_storage_unit_create'] is None): # noqa: E501
raise ValueError("Missing the required parameter `ddboost_storage_unit_create` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_post`") # noqa: E501
if self.api_client.client_side_validation and ('x_dd_auth_token' in params and
len(params['x_dd_auth_token']) < 1):
raise ValueError("Invalid value for parameter `x_dd_auth_token` when calling `rest_v10_dd_systems_systemid_protocols_ddboost_storage_units_post`, length must be greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'system_id' in params:
path_params['SYSTEM-ID'] = params['system_id'] # noqa: E501
query_params = []
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
if 'x_dd_auth_token' in params:
header_params['X-DD-AUTH-TOKEN'] = params['x_dd_auth_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'ddboost_storage_unit_create' in params:
body_params = params['ddboost_storage_unit_create']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/rest/v1.0/dd-systems/{SYSTEM-ID}/protocols/ddboost/storage-units', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DdboostStorageUnitInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get(self, system_id, id, **kwargs): # noqa: E501
"""Get a DDBoost storage unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get(system_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str id: ddboost storage units identifier. @#$type=xs:string (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str exclude_fields: Comma separated list of fields to be excluded from response object. Required and general fields such as paging will not be excluded. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:param str include_fields: Comma separated list of fields to be included in response object. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:return: DdboostStorageUnitInfoDetail20
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get_with_http_info(system_id, id, **kwargs) # noqa: E501
else:
(data) = self.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get_with_http_info(system_id, id, **kwargs) # noqa: E501
return data
def rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get_with_http_info(self, system_id, id, **kwargs): # noqa: E501
"""Get a DDBoost storage unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get_with_http_info(system_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str id: ddboost storage units identifier. @#$type=xs:string (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str exclude_fields: Comma separated list of fields to be excluded from response object. Required and general fields such as paging will not be excluded. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:param str include_fields: Comma separated list of fields to be included in response object. For example, \"msg, status,severity\" for an alert. @#$type=commaSeparatedStrings
:return: DdboostStorageUnitInfoDetail20
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['system_id', 'id', 'authorization', 'x_dd_auth_token', 'exclude_fields', 'include_fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if self.api_client.client_side_validation and ('system_id' not in params or
params['system_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `system_id` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get`") # noqa: E501
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get`") # noqa: E501
if self.api_client.client_side_validation and ('x_dd_auth_token' in params and
len(params['x_dd_auth_token']) < 1):
raise ValueError("Invalid value for parameter `x_dd_auth_token` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and ('exclude_fields' in params and not re.search(r'^([^,]+,*)+$', params['exclude_fields'])): # noqa: E501
raise ValueError("Invalid value for parameter `exclude_fields` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get`, must conform to the pattern `/^([^,]+,*)+$/`") # noqa: E501
if self.api_client.client_side_validation and ('include_fields' in params and not re.search(r'^([^,]+,*)+$', params['include_fields'])): # noqa: E501
raise ValueError("Invalid value for parameter `include_fields` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_get`, must conform to the pattern `/^([^,]+,*)+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'system_id' in params:
path_params['SYSTEM-ID'] = params['system_id'] # noqa: E501
if 'id' in params:
path_params['ID'] = params['id'] # noqa: E501
query_params = []
if 'exclude_fields' in params:
query_params.append(('exclude_fields', params['exclude_fields'])) # noqa: E501
if 'include_fields' in params:
query_params.append(('include_fields', params['include_fields'])) # noqa: E501
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
if 'x_dd_auth_token' in params:
header_params['X-DD-AUTH-TOKEN'] = params['x_dd_auth_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/rest/v2.0/dd-systems/{SYSTEM-ID}/protocols/ddboost/storage-units/{ID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DdboostStorageUnitInfoDetail20', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_put(self, system_id, id, ddboost_storage_unit_modify, **kwargs): # noqa: E501
"""Modify a Storage Unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_put(system_id, id, ddboost_storage_unit_modify, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str id: ddboost storage units identifier. @#$type=xs:string (required)
:param DdboostStorageUnitModify ddboost_storage_unit_modify: (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:return: DdboostStorageUnitInfoDetail20
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_put_with_http_info(system_id, id, ddboost_storage_unit_modify, **kwargs) # noqa: E501
else:
(data) = self.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_put_with_http_info(system_id, id, ddboost_storage_unit_modify, **kwargs) # noqa: E501
return data
def rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_put_with_http_info(self, system_id, id, ddboost_storage_unit_modify, **kwargs): # noqa: E501
"""Modify a Storage Unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_put_with_http_info(system_id, id, ddboost_storage_unit_modify, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param str id: ddboost storage units identifier. @#$type=xs:string (required)
:param DdboostStorageUnitModify ddboost_storage_unit_modify: (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:return: DdboostStorageUnitInfoDetail20
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['system_id', 'id', 'ddboost_storage_unit_modify', 'authorization', 'x_dd_auth_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if self.api_client.client_side_validation and ('system_id' not in params or
params['system_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `system_id` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_put`") # noqa: E501
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_put`") # noqa: E501
# verify the required parameter 'ddboost_storage_unit_modify' is set
if self.api_client.client_side_validation and ('ddboost_storage_unit_modify' not in params or
params['ddboost_storage_unit_modify'] is None): # noqa: E501
raise ValueError("Missing the required parameter `ddboost_storage_unit_modify` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_put`") # noqa: E501
if self.api_client.client_side_validation and ('x_dd_auth_token' in params and
len(params['x_dd_auth_token']) < 1):
raise ValueError("Invalid value for parameter `x_dd_auth_token` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_id_put`, length must be greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'system_id' in params:
path_params['SYSTEM-ID'] = params['system_id'] # noqa: E501
if 'id' in params:
path_params['ID'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
if 'x_dd_auth_token' in params:
header_params['X-DD-AUTH-TOKEN'] = params['x_dd_auth_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'ddboost_storage_unit_modify' in params:
body_params = params['ddboost_storage_unit_modify']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/rest/v2.0/dd-systems/{SYSTEM-ID}/protocols/ddboost/storage-units/{ID}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DdboostStorageUnitInfoDetail20', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_post(self, system_id, ddboost_storage_unit_create, **kwargs): # noqa: E501
"""Create a Storage Unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_post(system_id, ddboost_storage_unit_create, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param DdboostStorageUnitCreate ddboost_storage_unit_create: (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:return: DdboostStorageUnitInfoDetail20
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_post_with_http_info(system_id, ddboost_storage_unit_create, **kwargs) # noqa: E501
else:
(data) = self.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_post_with_http_info(system_id, ddboost_storage_unit_create, **kwargs) # noqa: E501
return data
def rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_post_with_http_info(self, system_id, ddboost_storage_unit_create, **kwargs): # noqa: E501
"""Create a Storage Unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_post_with_http_info(system_id, ddboost_storage_unit_create, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str system_id: DD system identifier. @#$type=xs:string (required)
:param DdboostStorageUnitCreate ddboost_storage_unit_create: (required)
:param str authorization: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:param str x_dd_auth_token: Clients need to specify Authorization or X-DD-AUTH-TOKEN. @#$type=xs:string
:return: DdboostStorageUnitInfoDetail20
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['system_id', 'ddboost_storage_unit_create', 'authorization', 'x_dd_auth_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if self.api_client.client_side_validation and ('system_id' not in params or
params['system_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `system_id` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_post`") # noqa: E501
# verify the required parameter 'ddboost_storage_unit_create' is set
if self.api_client.client_side_validation and ('ddboost_storage_unit_create' not in params or
params['ddboost_storage_unit_create'] is None): # noqa: E501
raise ValueError("Missing the required parameter `ddboost_storage_unit_create` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_post`") # noqa: E501
if self.api_client.client_side_validation and ('x_dd_auth_token' in params and
len(params['x_dd_auth_token']) < 1):
raise ValueError("Invalid value for parameter `x_dd_auth_token` when calling `rest_v20_dd_systems_systemid_protocols_ddboost_storage_units_post`, length must be greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'system_id' in params:
path_params['SYSTEM-ID'] = params['system_id'] # noqa: E501
query_params = []
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
if 'x_dd_auth_token' in params:
header_params['X-DD-AUTH-TOKEN'] = params['x_dd_auth_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'ddboost_storage_unit_create' in params:
body_params = params['ddboost_storage_unit_create']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/rest/v2.0/dd-systems/{SYSTEM-ID}/protocols/ddboost/storage-units', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DdboostStorageUnitInfoDetail20', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"[email protected]"
] | |
e4b9ae8070fb64421cd1a17b81be4ca33bd507bd | b3b066a566618f49ae83c81e963543a9b956a00a | /Intermediate Data Visualization with Seaborn/04_Creating Plots on Data Aware Grids/04_Building a PairGrid.py | 9a654d32b4feb9d7dc4923d50e47cef330e416b7 | [] | no_license | ahmed-gharib89/DataCamp_Data_Scientist_with_Python_2020 | 666c4129c3f0b5d759b511529a365dfd36c12f1a | f3d20b788c8ef766e7c86c817e6c2ef7b69520b8 | refs/heads/master | 2022-12-22T21:09:13.955273 | 2020-09-30T01:16:05 | 2020-09-30T01:16:05 | 289,991,534 | 2 | 0 | null | 2020-08-24T17:15:43 | 2020-08-24T17:15:42 | null | UTF-8 | Python | false | false | 1,354 | py | """
Building a PairGrid
When exploring a dataset, one of the earliest tasks is exploring the relationship between pairs of variables. This step is normally a precursor to additional investigation.
Seaborn supports this pair-wise analysis using the PairGrid. In this exercise, we will look at the Car Insurance Premium data we analyzed in Chapter 1. All data is available in the df variable.
Instructions 1/2
50 XP
1
2
Compare "fatal_collisions" to "premiums" by using a scatter plot mapped to a PairGrid()."""
# Create a PairGrid with a scatter plot for fatal_collisions and premiums
g = sns.PairGrid(df, vars=["fatal_collisions", "premiums"])
g2 = g.map(plt.scatter)
plt.show()
plt.clf()
"""
Create another PairGrid but plot a histogram on the diagonal and scatter plot on the off diagonal.
"""
# Create the same PairGrid but map a histogram on the diag
g = sns.PairGrid(df, vars=["fatal_collisions", "premiums"])
g2 = g.map_diag(plt.hist)
g3 = g2.map_offdiag(plt.scatter)
plt.show()
plt.clf()
#========================================================#
# DEVELOPER #
# BasitAminBhatti #
# Github #
# https://github.com/basitaminbhatti #
#========================================================# | [
"Your-Email"
] | Your-Email |
ed82d43819a50cc8adfb850789934e1c87866bb5 | 2daa3894e6d6929fd04145100d8a3be5eedbe21c | /tests/artificial/transf_sqrt/trend_poly/cycle_0/ar_12/test_artificial_128_sqrt_poly_0_12_100.py | d9d41e9db30a39012110217754bd29484e6124f5 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Henri-Lo/pyaf | a1f73a0cc807873bd7b79648fe51de9cfd6c126a | 08c968425d85dcace974d90db7f07c845a0fe914 | refs/heads/master | 2021-07-01T12:27:31.600232 | 2017-09-21T11:19:04 | 2017-09-21T11:19:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
dataset = tsds.generate_random_TS(N = 128 , FREQ = 'D', seed = 0, trendtype = "poly", cycle_length = 0, transform = "sqrt", sigma = 0.0, exog_count = 100, ar_order = 12);
art.process_dataset(dataset); | [
"[email protected]"
] | |
8e578b5c5e911fbe8995ba795536316e66e5a61b | 0ee72dc1b03070e25d3036bf6b562fc9b809ee72 | /freeze/__init__.py | 18f5610c310bc0963162bfdbec6dfe13797a4bdd | [
"MIT"
] | permissive | fabiocaccamo/django-freeze | d36a9c7a9e197b23fa63dc77f0901aba89e4dfaf | c2d5dfbf38b072d79e1a37489b07e91c8af9461c | refs/heads/main | 2023-08-29T12:50:19.069297 | 2023-07-18T07:35:52 | 2023-07-18T07:35:52 | 44,330,200 | 91 | 19 | MIT | 2023-09-08T13:52:25 | 2015-10-15T16:20:55 | Python | UTF-8 | Python | false | false | 269 | py | from freeze.metadata import (
__author__,
__copyright__,
__description__,
__license__,
__title__,
__version__,
)
__all__ = [
"__author__",
"__copyright__",
"__description__",
"__license__",
"__title__",
"__version__",
]
| [
"[email protected]"
] | |
983525aeb3a369cf1bd12f914b3440516b86d99a | 7950c4faf15ec1dc217391d839ddc21efd174ede | /leetcode-cn/0670.0_Maximum_Swap.py | 33229f1e40f8b51a167f593d9fa16ce58ddebd89 | [] | no_license | lixiang2017/leetcode | f462ecd269c7157aa4f5854f8c1da97ca5375e39 | f93380721b8383817fe2b0d728deca1321c9ef45 | refs/heads/master | 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 832 | py | '''
执行用时:36 ms, 在所有 Python3 提交中击败了68.71% 的用户
内存消耗:14.9 MB, 在所有 Python3 提交中击败了49.11% 的用户
通过测试用例:111 / 111
'''
class Solution:
def maximumSwap(self, num: int) -> int:
digits = list(str(num))
s_digits = sorted(digits, reverse=True)
if digits == s_digits:
return num
def max_index_after(i):
m = i
for j in range(i + 1, len(digits)):
if digits[j] >= digits[m]:
m = j
return m
n = len(digits)
for i in range(n - 1):
j = max_index_after(i)
if digits[i] < digits[j]:
digits[i], digits[j] = digits[j], digits[i]
break
return int(''.join(digits))
| [
"[email protected]"
] | |
3c5b293c6d389c7c7dc932dead2e0c0535d49fc5 | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res/scripts/client/gui/shared/formatters/__init__.py | 95539a9befb1c59e1830abe651b2f06f1e199360 | [] | no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 776 | py | # 2015.11.10 21:29:02 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/shared/formatters/__init__.py
import BigWorld
from gui.shared.formatters import icons
from gui.shared.formatters import text_styles
from gui.shared.formatters import time_formatters
__all__ = ('icons', 'text_styles', 'time_formatters')
def getClanAbbrevString(clanAbbrev):
return '[{0:>s}]'.format(clanAbbrev)
def getGlobalRatingFmt(globalRating):
if globalRating >= 0:
return BigWorld.wg_getIntegralFormat(globalRating)
return '--'
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\shared\formatters\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:29:02 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
3bb2bda63bb05e17d287b72bc50bda27aba736b4 | 18fe3f034f203bc8a22d08f15b29297ebcc7dfaf | /py/qlazypy/lib/densop_mcx.py | e7a3b8c9e9d186908e9c3aa923a516079358fdfc | [
"Apache-2.0"
] | permissive | katou-boop/qlazy | b8802c48b0cba0ba89cc1e1a69f551e0f4fdcc73 | 6b62fff65939a589603af7ed8be921c9f1669bb3 | refs/heads/master | 2023-02-17T12:30:05.419650 | 2021-01-17T23:20:20 | 2021-01-17T23:20:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | # -*- coding: utf-8 -*-
from qlazypy.error import *
from qlazypy.config import *
from qlazypy.util import *
# multi-controlled X gate
def __gray_code(de, n):
for k in range(2**n):
yield k^(k>>1)
def densop_mcx(de,qid=[]):
# controled and target register
qid_ctr = qid[:-1]
qid_tar = qid[-1]
# hadamard
de.h(qid_tar)
# controlled-RZ(psi), psi=pi/(2**(bitnum-1))
bitnum = len(qid_ctr)
psi = 1.0/(2**(bitnum-1)) # unit=pi(radian)
gray_pre = 0
for gray in __gray_code(de, bitnum):
if gray == 0:
continue
msb = len(str(bin(gray)))-3
chb = len(str(bin(gray^gray_pre)))-3
if gray != 1:
if chb == msb:
chb -= 1
de.cx(qid_ctr[chb], qid_ctr[msb])
de.cp(qid_ctr[msb], qid_tar, phase=psi)
psi = -psi
gray_pre = gray
# hadamard
de.h(qid_tar)
| [
"[email protected]"
] | |
6db1394c31c689f64f58cffb4a65caedab7887b6 | 55c46d50ed426a3dccef8c44904df4524de43aa1 | /oldp/apps/cases/api_views.py | ff02e857cdc98c72953bdf206858a565d1bfcd76 | [
"MIT"
] | permissive | docsuleman/oldp | 1a438a9c669a54aab2f76133200e566d627d9668 | 8dcaa8e6e435794c872346b5014945ace885adb4 | refs/heads/master | 2020-06-29T10:45:18.787344 | 2019-08-04T18:21:02 | 2019-08-04T18:21:02 | 200,513,942 | 0 | 0 | MIT | 2019-08-04T17:36:52 | 2019-08-04T16:07:25 | Python | UTF-8 | Python | false | false | 2,439 | py | import coreapi
import coreschema
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from django_filters.rest_framework import DjangoFilterBackend
from drf_haystack.filters import HaystackFilter
from drf_haystack.generics import HaystackGenericAPIView
from rest_framework import viewsets
from rest_framework.filters import OrderingFilter
from rest_framework.mixins import ListModelMixin
from rest_framework.permissions import AllowAny
from rest_framework.viewsets import ViewSetMixin
from oldp.api import SmallResultsSetPagination
from oldp.apps.cases.filters import CaseAPIFilter
from oldp.apps.cases.models import Case
from oldp.apps.cases.search_indexes import CaseIndex
from oldp.apps.cases.serializers import CaseSerializer, CASE_API_FIELDS, CaseSearchSerializer
from oldp.apps.search.filters import SearchSchemaFilter
class CaseViewSet(viewsets.ModelViewSet):
"""
List view for cases
"""
pagination_class = SmallResultsSetPagination # limit page (other content field blows up response size)
queryset = Case.get_queryset()
serializer_class = CaseSerializer
# lookup_field = 'slug'
filter_backends = (OrderingFilter, DjangoFilterBackend, )
filterset_class = CaseAPIFilter
ordering_fields = ('date', )
@method_decorator(cache_page(60))
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_queryset(self):
return Case.get_queryset()\
.select_related('court')\
.only(*CASE_API_FIELDS)
class CaseSearchSchemaFilter(SearchSchemaFilter):
search_index_class = CaseIndex
def get_default_schema_fields(self):
return [
# Search query field is required
coreapi.Field(
name='text',
location='query',
required=True,
schema=coreschema.String(description='Search query on text content (Lucence syntax support).'),
)
]
class CaseSearchViewSet(ListModelMixin, ViewSetMixin, HaystackGenericAPIView):
"""
Search view (list only)
"""
permission_classes = (AllowAny,)
pagination_class = SmallResultsSetPagination # limit page (other content field blows up response size)
index_models = [
Case
]
serializer_class = CaseSearchSerializer
filter_backends = (HaystackFilter, CaseSearchSchemaFilter,)
| [
"[email protected]"
] | |
fc696582a78cdd7c5d1899b2b36105b5ae57fb27 | cc2029f40a12e82712072275fc76a07ac59b5940 | /battles/tourneys/20170409_2015.py | 47a50e202ae61271c8e51095af49e9ed277655a0 | [
"MIT"
] | permissive | heitorchang/learn-code | d3fb8e45d539d302372126fe28e85032590b5707 | 5e6e56f7257de1910830619c01d470e892d7f9d8 | refs/heads/master | 2023-08-09T13:46:18.623772 | 2023-07-21T16:57:11 | 2023-07-21T16:57:11 | 147,522,837 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,482 | py | from math import log
description = """
You are playing a number guessing game with your friend. Your friend thought of some integer x from 1 to n. In order to guess the number, you can ask two types of questions:
"is x smaller or equal to a?" for some integer a;
"is x greater or equal to a?" for some integer a.
If the answer to your question is "yes", you should pay your friend $2, otherwise you should pay him $1.
How much will you have to pay to your friend, assuming that you apply the strategy that minimizes the amount of money you have to pay in order to guess the number in the worst case scenario?
"""
def numberGuessingNaive(n):
# solution by sensytive
p=[0]*(n+1)
for i in range(2,n+1):
p[i]=i
for m in range(1,i):
pr('p[i] 1+p[m] 2+p[i-m]')
p[i] = min(p[i], max(1+p[m], 2+p[i-m]))
pr('p')
return p[-1]
def factorialsProductTrailingZeros(l, r):
result = 0
last = 0
for i in range(1, r + 1):
number = i
while number % 5 == 0:
number /= 5
result += 1
if i >= l:
pr('result last')
result += last
return result
def test():
testeql(numberGuessingNaive(4),4)
testeql(numberGuessingNaive(3),3)
testeql(numberGuessingNaive(1),0)
# testeql(numberGuessingNaive(534),14)
testeql(factorialsProductTrailingZeros(4, 10), 7)
testeql(numberGuessingNaive(15), 0)
testeql(numberGuessingNaive(9), 0)
| [
"[email protected]"
] | |
1f9b109192968687be953ae31ed89405140c4775 | 4a63e96d7015e3e13d9b5204fc0261c05f600d3b | /Standard Library/tempfile/app.py | 7fc029dac7ed9b6e14fd7f28165dcf25da70c0c0 | [
"Apache-2.0"
] | permissive | shubhamnag14/Python-Documents | 0e38f58298d35b4df5b61adb361d720337148a00 | d3fee0ad90232b413f6ac1b562588fb255b79e42 | refs/heads/master | 2023-06-08T23:51:26.089840 | 2021-06-20T15:07:44 | 2021-06-20T15:07:44 | 380,832,776 | 0 | 0 | Apache-2.0 | 2021-06-27T20:33:08 | 2021-06-27T20:31:41 | null | UTF-8 | Python | false | false | 320 | py | import tempfile
def one():
file = tempfile.mkstemp()
with open(file[1], 'w+') as f:
f.write("This is a test")
f.seek(0)
print(f.read())
print(tempfile.gettempdir())
print(tempfile.gettempdirb())
print(tempfile.gettempprefix())
print(tempfile.gettempprefixb())
print(tempfile.tempdir)
| [
"[email protected]"
] | |
7c21708c07f793fe8b7ea0a740e301f39cdba0f3 | 00ce7b1e677abbfe7912a472e74b3fab92b9fc50 | /Data_processing/MNIST/checkSource.py | 0ca2d86a677aa1ee43da5cc7832283efb90ac5f8 | [] | no_license | Xharlie/MultiGen | a19b8cd76bc1933773411d69200e86bf1ba8ed74 | e3e646289249ce9418fb40f5a246310ac37e9a96 | refs/heads/master | 2021-01-22T10:56:16.432939 | 2017-12-02T03:34:41 | 2017-12-02T03:34:41 | 82,051,628 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | import h5py
from PIL import Image
import numpy as np
DIGIT=2
FORM=4
COLOR=0
TRANSFORM=1
def check():
imgArray = []
with h5py.File('../../Source/MNIST/all_info.h5', 'r') as f:
imgArray = f['data']
segm = f['segm']
digit = f['digit']
form = f['form']
color = f['color']
transform = f['transform']
index = 0
for i in range(imgArray.shape[0]):
if (digit[i][DIGIT] == 1 and form[i][FORM] == 1 and color[i][COLOR] == 1 and transform[i][TRANSFORM] == 1):
index = i
break
img = np.transpose(imgArray[index],(1,2,0))
img = (img)*255
img = Image.fromarray(img.astype(np.int8), 'RGB')
img.show()
# print segm.shape
# img2 = segm
# print img2
# img2 = Image.fromarray((img2 * 255).astype(np.int8), 'L')
# img2.show()
print digit,form,color,transform
if __name__ == '__main__':
check() | [
"[email protected]"
] | |
8f0f47ef704ca1bf13b6b054d960ad79eb855848 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2019_02_01/aio/operations/_operations.py | 8379386a6db3839f741ead07cbbbe9c6844a901e | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 4,659 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""Operations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2019_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["_models.OperationListResult"]:
"""Gets a list of compute operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2019_02_01.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.ContainerService/operations'} # type: ignore
| [
"[email protected]"
] | |
c1ea1c2df956749c6deeb18f05376f849453d2e6 | 3c3c274f266736c97dc14608511f039e65e31694 | /chalicelib/auth.py | ad8d2d2a8ff3cc74580968dde491e1779e63a446 | [] | no_license | craymaru/chalice-todoapp-training | b2de9a7bff52ae3675a36ac44c7886a003199c7c | 5a3229f3f4d185457812777432bd99adb9b7c56a | refs/heads/master | 2023-01-11T18:03:50.712684 | 2020-11-18T04:29:34 | 2020-11-18T04:29:34 | 313,465,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,168 | py | import hashlib
import hmac
import datetime
from uuid import uuid4
import jwt
from chalice import UnauthorizedError
# TODO: Figure out what we want to do with this.
# We can either move this out to env vars in config.json,
# use KMS to encrypt/decrypt this value, or store this in SSM.
# Until we figure it out I'll store it here.
_SECRET = b'\xf7\xb6k\xabP\xce\xc1\xaf\xad\x86\xcf\x84\x02\x80\xa0\xe0'
def get_jwt_token(username, password, record):
actual = hashlib.pbkdf2_hmac(
record['hash'],
password.encode(),
record['salt'].value,
record['rounds']
)
expected = record['hashed'].value
if hmac.compare_digest(actual, expected):
now = datetime.datetime.utcnow()
unique_id = str(uuid4())
payload = {
'sub': username,
'iat': now,
'nbf': now,
'jti': unique_id,
# NOTE: We can also add 'exp' if we want tokens to expire.
}
return jwt.encode(payload, _SECRET, algorithm='HS256')
raise UnauthorizedError('Invalid password')
def decode_jwt_token(token):
return jwt.decode(token, _SECRET, algorithms=['HS256'])
| [
"[email protected]"
] | |
a7b55848abbb88a94997e6304eb564af957d682f | e012ac032de8bf5bf880d4917fb6329f99f47d2b | /grdient_decent.py | 13306f92fd7645e6b5595b881733dbdf2c385b6e | [] | no_license | janakiraam/ML-ToyProbelm | d1d0b8ffe55fb68cea22ea2326be3aeb23e64423 | f8775ebce8f6b464e023bda92890fc30bcf923e6 | refs/heads/main | 2023-03-15T11:06:32.252230 | 2021-03-13T17:19:08 | 2021-03-13T17:19:08 | 341,291,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | import numpy as np
def gradient_decent(x,y):
m_curr=0
b_curr=0
iteration=100
n = len(x)
learning_rate=0.001
for i in range(iteration):
y_predict=m_curr*x+b_curr
md=-(2/n)*sum(x*(y-y_predict))
bd=-(2/n)*sum(y-y_predict)
m_curr=m_curr - learning_rate*md
b_curr=b_curr - learning_rate*bd
print("m {}, b {} , iteration {}".format(m_curr,b_curr,i))
x=np.array([1,2,3,4,5])
y=np.array([5,7,11,25,13])
gradient_decent(x,y) | [
"[email protected]"
] | |
4cd37584ef4a0d01cd88ff800395b7ab860f7b52 | 7950c4faf15ec1dc217391d839ddc21efd174ede | /problems/0530.0_Minimum_Absolute_Difference_in_BST.py | 93ad4e5a1a70ef0a7e5a3df60900e33c8cd38472 | [] | no_license | lixiang2017/leetcode | f462ecd269c7157aa4f5854f8c1da97ca5375e39 | f93380721b8383817fe2b0d728deca1321c9ef45 | refs/heads/master | 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | '''
Runtime: 99 ms, faster than 5.10% of Python3 online submissions for Minimum Absolute Difference in BST.
Memory Usage: 18.5 MB, less than 69.94% of Python3 online submissions for Minimum Absolute Difference in BST.
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def getMinimumDifference(self, root: Optional[TreeNode]) -> int:
def inorder(node):
if node:
yield from inorder(node.left)
yield node.val
yield from inorder(node.right)
return min(b - a for a, b in pairwise(inorder(root)))
| [
"[email protected]"
] | |
dd6b99605f2ad07b00b76fab12d2dfa0ec787223 | 0f949dc62b728b2cf6e0e172eb7c1cc31012244d | /script/tft_touch.py | 5190129c1503eb022011cef240009dfb42cc3187 | [
"MIT"
] | permissive | jeguzzi/mt_screen | 74c4314012ddb9471650d8b1f10c889265101f92 | f06ea6404474e8a71a4d61ec381a6e99e03e0ebb | refs/heads/master | 2020-03-19T09:39:44.114549 | 2018-06-06T09:46:05 | 2018-06-06T09:46:05 | 136,307,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,953 | py | #!/usr/bin/env python
from __future__ import division
import threading
import evdev
import rospy
import wiringpi2 as wp
from evdev.ecodes import ABS_X, ABS_Y, BTN_TOUCH, EV_ABS, EV_KEY
from sensor_msgs.msg import Joy
from std_msgs.msg import Bool
KEYS = [1, 4, 5]
IN = 0
OUT = 1
class TFTouch(object):
def __init__(self):
rospy.init_node('tft')
self.continuos = rospy.get_param('~continuos', True)
rate = rospy.get_param('~rate', 10.0)
if rate > 0:
period = 1 / rate
else:
period = 0.1
self.width = rospy.get_param('tft/width', 320)
self.height = rospy.get_param('tft/height', 240)
self.dev = evdev.InputDevice('/dev/input/ts_uinput')
wp.wiringPiSetup()
for key, pin in enumerate(KEYS):
wp.pinMode(pin, IN)
self.key_pub = {pin: rospy.Publisher('tft/key_{key}'.format(key=i + 1), Bool, queue_size=1)
for i, pin in enumerate(KEYS)}
self.state = {pin: 0 for pin in KEYS}
self.touch = {'x': None, 'y': None, 'down': 0}
self.joy_pub = rospy.Publisher('tft/touch', Joy, queue_size=1)
rospy.Timer(rospy.Duration(period), self.update_keys, oneshot=False)
self.dev_thread = threading.Thread(target=self.update_touch)
self.dev_thread.daemon = True
self.dev_thread.start()
self.buttons = []
self.axes = []
def update_touch(self):
for event in self.dev.read_loop():
if event.type == EV_ABS:
if event.code == ABS_X:
self.touch['x'] = max(min(event.value, self.width), 0)
continue
if event.code == ABS_Y:
self.touch['y'] = max(min((self.height - event.value), self.height), 0)
continue
if event.type == EV_KEY and event.code == BTN_TOUCH:
self.touch['down'] = event.value
continue
def update_keys(self, event):
# 1 is up, 0 is down
state = {pin: 1 - wp.digitalRead(pin) for pin in KEYS}
if self.touch['down'] and self.touch['x'] is not None and self.touch['y'] is not None:
axes = [2 * self.touch['x'] / self.width - 1, 2 * self.touch['y'] / self.height - 1]
else:
axes = [0, 0]
buttons = [self.touch['down']] + [state[pin] for pin in KEYS]
if self.continuos or buttons != self.buttons or axes != self.axes:
msg = Joy(buttons=buttons, axes=axes)
msg.header.stamp = rospy.Time.now()
# msg.header.frame_id = 'tft'
self.joy_pub.publish(msg)
self.buttons = buttons
self.axes = axes
for pin, value in state.items():
if value != self.state.get(pin):
self.key_pub[pin].publish(value)
self.state = state
if __name__ == '__main__':
t = TFTouch()
rospy.spin()
| [
"[email protected]"
] | |
4de44a5b5d1cf08e40c04309c6c96b326fff5031 | c3179dc6c11770fe877d9b08bebd28448ee66ba8 | /mtdnn/tasks/utils.py | 0e80a8d2a9db50028038b3cf4486bcfa8fe6d561 | [
"MIT"
] | permissive | microsoft/MT-DNN | 2a0f102916a1b092f25b4999834177bd38319c53 | e5c3e07f3a8e55067433714ce261a6d28ba73d22 | refs/heads/master | 2023-06-29T23:57:42.108328 | 2020-07-02T02:22:06 | 2020-07-02T02:22:06 | 215,127,881 | 151 | 28 | MIT | 2023-06-12T21:28:37 | 2019-10-14T19:25:46 | Python | UTF-8 | Python | false | false | 14,964 | py | # coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
import os
import pdb
from random import shuffle
from sys import path
from mtdnn.common.metrics import calc_metrics
from mtdnn.common.types import DataFormat
def process_data_and_dump_rows(
rows: list,
out_path: str,
data_format: DataFormat,
write_mode: str = "w",
dump_rows: bool = False,
) -> None:
"""
Output files should have following format
:param rows: data
:param out_path: output file path
:return: processed_rows: List of string rows
"""
processed_rows = []
for row in rows:
data = ""
if data_format in [DataFormat.PremiseOnly, DataFormat.Sequence]:
for col in ["uid", "label", "premise"]:
if "\t" in str(row[col]):
pdb.set_trace()
data = f"{row['uid']}\t{row['label']}\t{row['premise']}\n"
elif data_format == DataFormat.PremiseAndOneHypothesis:
for col in ["uid", "label", "premise", "hypothesis"]:
if "\t" in str(row[col]):
pdb.set_trace()
data = (
f"{row['uid']}\t{row['label']}\t{row['premise']}\t{row['hypothesis']}\n"
)
elif data_format == DataFormat.PremiseAndMultiHypothesis:
for col in ["uid", "label", "premise"]:
if "\t" in str(row[col]):
pdb.set_trace()
hypothesis = row["hypothesis"]
for one_hypo in hypothesis:
if "\t" in str(one_hypo):
pdb.set_trace()
hypothesis = "\t".join(hypothesis)
data = f"{row['uid']}\t{row['ruid']}\t{row['label']}\t{row['premise']}\t{hypothesis}\n"
else:
raise ValueError(data_format)
processed_rows.append(data)
# Save data if dump_rows is true
if dump_rows:
with open(out_path, mode=write_mode, encoding="utf-8") as out_f:
out_f.writelines(processed_rows)
return processed_rows
def load_scitail(file_path, kwargs: dict = {}):
""" Loading scitail """
rows = []
cnt = 0
with open(file_path, encoding="utf8") as f:
for line in f:
blocks = line.strip().split("\t")
assert len(blocks) > 2
if blocks[0] == "-":
continue
sample = {
"uid": str(cnt),
"premise": blocks[0],
"hypothesis": blocks[1],
"label": blocks[2],
}
rows.append(sample)
cnt += 1
return rows
def load_snli(file_path, kwargs: dict = {}):
""" Load SNLI """
header = kwargs.get("header", True)
rows = []
cnt = 0
with open(file_path, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
assert len(blocks) > 10
if blocks[-1] == "-":
continue
lab = blocks[-1]
if lab is None:
import pdb
pdb.set_trace()
sample = {
"uid": blocks[0],
"premise": blocks[7],
"hypothesis": blocks[8],
"label": lab,
}
rows.append(sample)
cnt += 1
return rows
def load_mnli(file_path, kwargs: dict = {}):
""" Load MNLI """
header = kwargs.get("header", True)
multi_snli = kwargs.get("multi_snli", False)
is_train = kwargs.get("is_train", True)
rows = []
cnt = 0
with open(file_path, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
assert len(blocks) > 9
if blocks[-1] == "-":
continue
lab = "contradiction"
if is_train:
lab = blocks[-1]
if lab is None:
import pdb
pdb.set_trace()
sample = {
"uid": blocks[0],
"premise": blocks[8],
"hypothesis": blocks[9],
"label": lab,
}
rows.append(sample)
cnt += 1
return rows
def load_mrpc(file_path, kwargs: dict = {}):
""" Load MRPC """
header = kwargs.get("header", True)
is_train = kwargs.get("is_train", True)
rows = []
cnt = 0
with open(file_path, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
assert len(blocks) > 4
lab = 0
if is_train:
lab = int(blocks[0])
sample = {
"uid": cnt,
"premise": blocks[-2],
"hypothesis": blocks[-1],
"label": lab,
}
rows.append(sample)
cnt += 1
return rows
def load_qnli(file_path, kwargs: dict = {}):
""" Load QNLI for classification"""
header = kwargs.get("header", True)
is_train = kwargs.get("is_train", True)
rows = []
cnt = 0
with open(file_path, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
assert len(blocks) > 2
lab = "not_entailment"
if is_train:
lab = blocks[-1]
if lab is None:
import pdb
pdb.set_trace()
sample = {
"uid": blocks[0],
"premise": blocks[1],
"hypothesis": blocks[2],
"label": lab,
}
rows.append(sample)
cnt += 1
return rows
def load_qqp(file_path, kwargs: dict = {}):
""" Load QQP """
header = kwargs.get("header", True)
is_train = kwargs.get("is_train", True)
rows = []
cnt = 0
skipped = 0
with open(file_path, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
if is_train and len(blocks) < 6:
skipped += 1
continue
if not is_train:
assert len(blocks) == 3
lab = 0
if is_train:
lab = int(blocks[-1])
sample = {
"uid": cnt,
"premise": blocks[-3],
"hypothesis": blocks[-2],
"label": lab,
}
else:
sample = {
"uid": int(blocks[0]),
"premise": blocks[-2],
"hypothesis": blocks[-1],
"label": lab,
}
rows.append(sample)
cnt += 1
return rows
def load_rte(file_path, kwargs: dict = {}):
""" Load RTE """
header = kwargs.get("header", True)
is_train = kwargs.get("is_train", True)
rows = []
cnt = 0
with open(file_path, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
if is_train and len(blocks) < 4:
continue
if not is_train:
assert len(blocks) == 3
lab = "not_entailment"
if is_train:
lab = blocks[-1]
sample = {
"uid": int(blocks[0]),
"premise": blocks[-3],
"hypothesis": blocks[-2],
"label": lab,
}
else:
sample = {
"uid": int(blocks[0]),
"premise": blocks[-2],
"hypothesis": blocks[-1],
"label": lab,
}
rows.append(sample)
cnt += 1
return rows
def load_wnli(file_path, kwargs: dict = {}):
""" Load WNLI """
header = kwargs.get("header", True)
is_train = kwargs.get("is_train", True)
rows = []
cnt = 0
with open(file_path, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
if is_train and len(blocks) < 4:
continue
if not is_train:
assert len(blocks) == 3
lab = 0
if is_train:
lab = int(blocks[-1])
sample = {
"uid": cnt,
"premise": blocks[-3],
"hypothesis": blocks[-2],
"label": lab,
}
else:
sample = {
"uid": cnt,
"premise": blocks[-2],
"hypothesis": blocks[-1],
"label": lab,
}
rows.append(sample)
cnt += 1
return rows
def load_sst(file_path, kwargs: dict = {}):
""" Load SST """
header = kwargs.get("header", True)
is_train = kwargs.get("is_train", True)
rows = []
cnt = 0
with open(file_path, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
if is_train and len(blocks) < 2:
continue
lab = 0
if is_train:
lab = int(blocks[-1])
sample = {"uid": cnt, "premise": blocks[0], "label": lab}
else:
sample = {"uid": int(blocks[0]), "premise": blocks[1], "label": lab}
cnt += 1
rows.append(sample)
return rows
def load_cola(file_path, kwargs: dict = {}):
""" Load COLA """
header = kwargs.get("header", True)
is_train = kwargs.get("is_train", True)
rows = []
cnt = 0
with open(file_path, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
if is_train and len(blocks) < 2:
continue
lab = 0
if is_train:
lab = int(blocks[1])
sample = {"uid": cnt, "premise": blocks[-1], "label": lab}
else:
sample = {"uid": cnt, "premise": blocks[-1], "label": lab}
rows.append(sample)
cnt += 1
return rows
def load_stsb(file_path, kwargs: dict = {}):
""" Load STSB """
header = kwargs.get("header", True)
is_train = kwargs.get("is_train", True)
rows = []
cnt = 0
with open(file_path, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
assert len(blocks) > 8
score = "0.0"
if is_train:
score = blocks[-1]
sample = {
"uid": cnt,
"premise": blocks[-3],
"hypothesis": blocks[-2],
"label": score,
}
else:
sample = {
"uid": cnt,
"premise": blocks[-2],
"hypothesis": blocks[-1],
"label": score,
}
rows.append(sample)
cnt += 1
return rows
def load_conll_ner(file_path, kwargs: dict = {}):
""" Load NER """
rows = []
cnt = 0
sentence = []
label = []
with open(file_path, encoding="utf8") as f:
for line in f:
line = line.strip()
if len(line) == 0 or line.startswith("-DOCSTART") or line[0] == "\n":
if len(sentence) > 0:
sample = {"uid": cnt, "premise": sentence, "label": label}
rows.append(sample)
sentence = []
label = []
cnt += 1
continue
splits = line.split(" ")
sentence.append(splits[0])
label.append(splits[-1])
if len(sentence) > 0:
sample = {"uid": cnt, "premise": sentence, "label": label}
return rows
def load_conll_pos(file_path, kwargs: dict = {}):
""" Load POS """
rows = []
cnt = 0
sentence = []
label = []
with open(file_path, encoding="utf8") as f:
for line in f:
line = line.strip()
if len(line) == 0 or line.startswith("-DOCSTART") or line[0] == "\n":
if len(sentence) > 0:
sample = {"uid": cnt, "premise": sentence, "label": label}
rows.append(sample)
sentence = []
label = []
cnt += 1
continue
splits = line.split(" ")
sentence.append(splits[0])
label.append(splits[1])
if len(sentence) > 0:
sample = {"uid": cnt, "premise": sentence, "label": label}
return rows
def load_conll_chunk(file_path, kwargs: dict = {}):
""" Load CHUNK """
rows = []
cnt = 0
sentence = []
label = []
with open(file_path, encoding="utf8") as f:
for line in f:
line = line.strip()
if len(line) == 0 or line.startswith("-DOCSTART") or line[0] == "\n":
if len(sentence) > 0:
sample = {"uid": cnt, "premise": sentence, "label": label}
rows.append(sample)
sentence = []
label = []
cnt += 1
continue
splits = line.split(" ")
sentence.append(splits[0])
label.append(splits[2])
if len(sentence) > 0:
sample = {"uid": cnt, "premise": sentence, "label": label}
return rows
def submit(path, data, label_dict=None):
header = "index\tprediction"
with open(path, "w") as writer:
predictions, uids = data["predictions"], data["uids"]
writer.write("{}\n".format(header))
assert len(predictions) == len(uids)
# sort label
paired = [(int(uid), predictions[idx]) for idx, uid in enumerate(uids)]
paired = sorted(paired, key=lambda item: item[0])
for uid, pred in paired:
if label_dict is None:
writer.write("{}\t{}\n".format(uid, pred))
else:
assert type(pred) is int
writer.write("{}\t{}\n".format(uid, label_dict[pred]))
| [
"[email protected]"
] | |
e9a4edfe1026ffae7f0e4077a0753cd8224ef2a4 | d18ed72d6f8d27dd8a13eab5c6366f9dca48aa6b | /espresso/jobmanager/jobmanager/temp/bin/packjobdir.py | 697971511dcfa49909f268637571980c629e1286 | [] | no_license | danse-inelastic/AbInitio | 6f1dcdd26a8163fa3026883fb3c40f63d1105b0c | 401e8d5fa16b9d5ce42852b002bc2e4274afab84 | refs/heads/master | 2021-01-10T19:16:35.770411 | 2011-04-12T11:04:52 | 2011-04-12T11:04:52 | 34,972,670 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2009 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def main():
from vnf.applications.PackJobDir import PackJobDir as base
class App(base):
def _getPrivateDepositoryLocations(self):
return ['../config']
app = App()
return app.run()
# main
if __name__ == '__main__':
# invoke the application shell
main()
# version
__id__ = "$Id$"
# End of file
| [
"[email protected]"
] | |
ff30e8932a6292b69bb900155874ffcfa1e06431 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_136/2930.py | 97be09473c78d8ee4bccfb81bd58eb99d9cd14ca | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | from __future__ import division
T = input()
for i in range(T):
C, F, X = [float(x) for x in raw_input().split()]
cookiesRate = 2
if C >= X : print "Case #%d: %.7f" % (i+1, X/cookiesRate)
else:
timeElapsed = 0
while(C/cookiesRate + X/(cookiesRate+F) < X/cookiesRate):
timeElapsed += C/cookiesRate
cookiesRate += F
timeElapsed += X/cookiesRate
print "Case #%d: %.7f" % (i+1, timeElapsed) | [
"[email protected]"
] | |
c633ac2470e05a99614be9f9f82a751daa8489db | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/c4ccaa8b4474471f993db5910720bf59.py | 53dba6ce82f915abfd9d8828c4b03607686fbbc1 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 1,305 | py | import unicodedata
STANDARD_RESPONSES = {
'question': 'Sure.',
'exclamation': 'Woah, chill out!',
'empty': 'Fine. Be that way!',
'other': 'Whatever.'
}
def hey(*statements):
for statement in statements:
if type(statement) != str:
try:
statement = str(statement)
except:
statement = unicodedata.normalize('NFKD', statement).encode('ascii','ignore')
if is_empty(statement):
return STANDARD_RESPONSES['empty']
punctuation = statement[len(statement) - 1]
if is_exclamation(statement, punctuation):
return STANDARD_RESPONSES['exclamation']
elif is_question(statement, punctuation):
return STANDARD_RESPONSES['question']
else:
return STANDARD_RESPONSES['other']
def is_empty(statement):
if len(statement) == 0 or statement.isspace():
return True
else:
return False
def is_question(statement, punctuation):
if punctuation == '?':
return True
return False
def is_exclamation(statement, punctuation):
if punctuation == '!':
if statement.isupper():
return True
else:
return False
elif statement.isupper():
return True
return False
| [
"[email protected]"
] | |
141edf402032a4bbe9c3349258944e9dcfa2c803 | fb7efe44f4d9f30d623f880d0eb620f3a81f0fbd | /chrome/browser/android/digital_asset_links/DEPS | 7023254e344e39b9b94c5db81d7a70a7df505240 | [
"BSD-3-Clause"
] | permissive | wzyy2/chromium-browser | 2644b0daf58f8b3caee8a6c09a2b448b2dfe059c | eb905f00a0f7e141e8d6c89be8fb26192a88c4b7 | refs/heads/master | 2022-11-23T20:25:08.120045 | 2018-01-16T06:41:26 | 2018-01-16T06:41:26 | 117,618,467 | 3 | 2 | BSD-3-Clause | 2022-11-20T22:03:57 | 2018-01-16T02:09:10 | null | UTF-8 | Python | false | false | 296 | # It is likely that this code will eventually be shared across platforms, so
# excluding dependencies that would make this being a component impossible.
include_rules = [
"-content",
"-chrome",
"+base",
"+content/public/test",
"+chrome/browser/android/digital_asset_links",
"+net",
]
| [
"[email protected]"
] | ||
9836c4db6976992908c3e2fdd5c42aee5b2c2e44 | 66d352e30036b0917e22b2ccde6e0bbc05f9758c | /TelluricSpectra/TellRemoval_interptest.py | 54d1373f0ce141d99b8b9bb15b17c2674b949ca8 | [] | no_license | jason-neal/Phd-codes | 8354563b1d2b0fcce39d72adbfd82b65557399b4 | c947ffa56228746e2e5cdb3ab99e174f6c8e9776 | refs/heads/master | 2023-08-30T23:11:55.394560 | 2022-04-24T09:25:28 | 2022-04-24T09:25:28 | 42,106,284 | 0 | 1 | null | 2023-08-16T02:22:59 | 2015-09-08T10:40:26 | Jupyter Notebook | UTF-8 | Python | false | false | 5,575 | py | #!/usr/bin/env python
# -*- coding: utf8 -*-
""" Codes for Telluric contamination removal
Interpolates telluric spectra to the observed spectra.
Divides spectra telluric spectra
can plot result
"""
import argparse
import os
import time
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from astropy.io import fits
from scipy import interpolate
from scipy.interpolate import interp1d
import GaussianFitting as gf
import Obtain_Telluric as obt
def divide_spectra(spec_a, spec_b):
""" Assumes that the spectra have been interpolated to same wavelength step"""
""" Divide two spectra"""
assert(len(spec_a) == len(spec_b)), "Not the same length"
divide = spec_a / spec_b
return divide
def match_wl(wl, spec, ref_wl):
"""Interpolate Wavelengths of spectra to common WL
Most likely convert telluric to observed spectra wl after wl mapping performed"""
newspec1 = np.interp(ref_wl, wl, spec) # 1-d peicewise linear interpolat
test_plot_interpolation(wl, spec,ref_wl,newspec1)
print("newspec1")
# cubic spline with scipy
#linear_interp = interp1d(wl, spec)
#linear_interp = interp1d(wl, spec, kind='cubic')
# Timeing interpolation
starttime = time.time()
newspec2 = interpolate.interp1d(wl, spec, kind='linear')(ref_wl)
print("linear intergration time =", time.time()-starttime)
starttime = time.time()
newspec2 = interpolate.interp1d(wl, spec, kind='slinear')(ref_wl)
print("slinear intergration time =", time.time()-starttime)
starttime = time.time()
newspec2 = interpolate.interp1d(wl, spec, kind='quadratic')(ref_wl)
print("quadratic intergration time =", time.time()-starttime)
starttime = time.time()
newspec2 = interpolate.interp1d(wl, spec, kind='cubic')(ref_wl)
print("cubic intergration time =", time.time()-starttime)
#newspec2 = interp1d(wl, spec, kind='cubic')(ref_wl)
print("newspec2")
#ewspec2 = sp.interpolate.interp1d(wl, spec, kind='cubic')(ref_wl)
return newspec1, newspec2 # test inperpolations
def plot_spectra(wl, spec, colspec="k.-", label=None, title="Spectrum"):
""" Do I need to replicate plotting code?
Same axis
"""
plt.plot(wl, spec, colspec, label=label)
plt.title(title)
plt.legend()
plt.show(block=False)
return None
def test_plot_interpolation(x1, y1, x2, y2, methodname=None):
""" Plotting code """
plt.plot(x1, y1, label="original values")
plt.plot(x2, y2, label="new points")
plt.title("testing Interpolation: ", methodname)
plt.legend()
plt.xlabel("Wavelength (nm)")
plt.ylabel("Norm Intensity")
plt.show()
return None
def telluric_correct(wl_obs, spec_obs, wl_tell, spec_tell):
"""Code to contain other functions in this file
1. Interpolate spectra to same wavelengths with match_WLs()
2. Divide by Telluric
3. ...
"""
print("Before match_wl")
interp1, interp2 = match_wl(wl_tell, spec_tell, wl_obs)
print("After match_wl")
# could just do interp here without match_wl function
# test outputs
#print("test1")
#test_plot_interpolation(wl_tell, spec_tell, wl_obs, interp1)
#print("test2")
# test_plot_interpolation(wl_tell, spec_tell, wl_obs, interp2)
# division
print("Before divide_spectra")
corrected_spec = divide_spectra(spec_obs, interp2)
print("After divide_spectra")
#
# other corrections?
return corrected_spec
def _parser():
"""Take care of all the argparse stuff.
:returns: the args
"""
parser = argparse.ArgumentParser(description='Telluric Removal')
parser.add_argument('fname', help='Input fits file')
parser.add_argument('-o', '--output', default=False,
help='Ouput Filename',)
args = parser.parse_args()
return args
def main(fname, output=False):
homedir = os.getcwd()
data = fits.getdata(fname)
wl = data["Wavelength"]
I = data["Extracted_DRACS"]
hdr = fits.getheader(fname)
datetime = hdr["DATE-OBS"]
obsdate, obstime = datetime.split("T")
obstime, __ = obstime.split(".")
tellpath = "/home/jneal/Phd/data/Tapas/"
tellname = obt.get_telluric_name(tellpath, obsdate, obstime)
print("tell name", tellname)
tell_data = obt.load_telluric(tellpath, tellname[0])
wl_lower = np.min(wl/1.0001)
wl_upper = np.max(wl*1.0001)
tell_data = gf.slice_spectra(tell_data[0], tell_data[1], wl_lower, wl_upper)
#tell_data =
print("After slice spectra")
plt.figure()
plt.plot(wl, I, label="Spectra")
plt.plot(tell_data[0], tell_data[1], label="Telluric lines")
plt.show()
# Loaded in the data
# Now perform the telluric removal
I_corr = telluric_correct(wl, I, tell_data[0], tell_data[1])
print("After telluric_correct")
plt.figure()
plt.plot(wl, I_corr, label="Corrected Spectra")
plt.plot(tell_data[0], tell_data[1], label="Telluric lines")
plt.show()
if __name__ == "__main__":
args = vars(_parser())
fname = args.pop('fname')
opts = {k: args[k] for k in args}
main(fname, **opts)
""" Some test code for testing functions """
sze = 20
x2 = range(sze)
y2 = np.random.randn(len(x2)) + np.ones_like(x2)
y2 = 0.5 * np.ones_like(x2)
x1 = np.linspace(1, sze-1.5, 9)
y1 = np.random.randn(len(x1)) + np.ones_like(x1)
y1 = np.ones_like(x1)
print(x1)
print(x2)
#print(y1)
#print(y2)
y1_cor = telluric_correct(x1, y1, x2, y2)
print(x1)
print(y1)
print(y1_cor)
| [
"[email protected]"
] | |
9c1b67405acfc447e0bcde61a0b406ab29189c33 | f4713830c8519daca9d75ec692a6937ee03c74d4 | /Problems/Algorithms/953. Verifying an Alien Dictionary/alien_dictionary.py | af8a014ae986a3a0467e9a3207355cbfdb4b4240 | [
"MIT"
] | permissive | xuedong/leet-code | a0dd38cb884292de9d947718bb00160eff2b0f00 | 285d49cd7061ec43368d63b7c7c56763be520570 | refs/heads/master | 2023-09-03T02:38:55.932182 | 2023-09-02T18:35:42 | 2023-09-02T18:35:42 | 189,745,542 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | #!/usr/bin/env python3
from typing import List
class Solution:
def isAlienSorted(self, words: List[str], order: str) -> bool:
map = {ch: idx for idx, ch in enumerate(order)}
words = [[map[ch] for ch in word] for word in words]
return all(word1 <= word2 for word1, word2 in zip(words[:-1], words[1:]))
| [
"[email protected]"
] | |
3791527cea4f9b19510cd2511f27d307b569de22 | 4d2de834ecea6ef444b1c45afb5a41e717900858 | /app/app_todo/__init__.py | 33a8204e60a9ea5ebfaf02b5c996d4aafaf808af | [] | no_license | florije1988/flask_regular | 19da04c59fbf600274d206750ccb8cf355db2d24 | 1219e4efbad76202d6dca7e4b2148344ea9edf8c | refs/heads/master | 2020-12-24T13:21:29.840919 | 2014-12-16T00:58:15 | 2014-12-16T00:58:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | # -*- coding: utf-8 -*-
__author__ = 'florije'
from flask import Blueprint
from app.custom_api import Api
app_todo = Blueprint('app_task', __name__)
api_todo = Api(app_todo, catch_all_404s=True)
from . import views
api_todo.add_resource(views.HelloHandler, '/hello') | [
"[email protected]"
] | |
3342dbd03130abc2b867b2e3e093a75c7f00aafa | 1e177ebdcb470f738c058606ac0f86a36085f661 | /Python3/Tkinter/tkinter020.py | 23c5a22cc77a5d384d51239477848c13a696f07a | [] | no_license | robingreig/raspi-git | 5cbdd295c1048a0571aa2c2f8576438269439f07 | 7373bf94557d7a88c8f343362ba64f9cd19c8ce7 | refs/heads/master | 2023-08-31T03:16:17.286700 | 2023-08-26T11:54:23 | 2023-08-26T11:54:23 | 16,873,881 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | #!/usr/bin/python3
from tkinter import *
root = Tk()
myContainer1 = Frame(root)
myContainer1.pack()
root.mainloop()
| [
"[email protected]"
] | |
0ba9aca97b1c1f59da1afb823752e4f46a680b96 | feae88b4a8bc0aba388dcc2eeb7debb49d736809 | /apps/second_app/urls.py | fb99d9914ffc2c2fedcdee10fd14c61afe4e550b | [] | no_license | john-gore/belt3_retry | ec8a5582382fc00f0bcb3cf973fe9cd073ed571c | 03aa6d7ff9988615a96d2c882282107d389b1c52 | refs/heads/master | 2021-07-21T11:11:42.972344 | 2017-10-29T21:34:09 | 2017-10-29T21:34:09 | 108,772,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from django.conf.urls import url
from django.contrib import admin
from . import views
from ..first_app.models import User # This line is new!
urlpatterns = [
url(r'^$', views.index, name='index') # This line has changed!
] | [
"[email protected]"
] | |
fbc05970539a311c1532e03d1461d962abe1cae2 | 5b4312ddc24f29538dce0444b7be81e17191c005 | /autoware.ai/1.12.0/devel/.private/vector_map_msgs/lib/python2.7/dist-packages/vector_map_msgs/msg/_PointArray.py | 302c83b9f7d628767effb2ae4bd898435e6dc65f | [
"MIT"
] | permissive | muyangren907/autoware | b842f1aeb2bfe7913fb2be002ea4fc426b4e9be2 | 5ae70f0cdaf5fc70b91cd727cf5b5f90bc399d38 | refs/heads/master | 2020-09-22T13:08:14.237380 | 2019-12-03T07:12:49 | 2019-12-03T07:12:49 | 225,167,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,546 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from vector_map_msgs/PointArray.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import vector_map_msgs.msg
import std_msgs.msg
class PointArray(genpy.Message):
_md5sum = "6d79425254a86e33112d6737776efb2b"
_type = "vector_map_msgs/PointArray"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
Point[] data
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
================================================================================
MSG: vector_map_msgs/Point
# Ver 1.00
int32 pid
float64 b
float64 l
float64 h
float64 bx
float64 ly
int32 ref
int32 mcode1
int32 mcode2
int32 mcode3
"""
__slots__ = ['header','data']
_slot_types = ['std_msgs/Header','vector_map_msgs/Point[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,data
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(PointArray, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.data is None:
self.data = []
else:
self.header = std_msgs.msg.Header()
self.data = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.data)
buff.write(_struct_I.pack(length))
for val1 in self.data:
_x = val1
buff.write(_get_struct_i5d4i().pack(_x.pid, _x.b, _x.l, _x.h, _x.bx, _x.ly, _x.ref, _x.mcode1, _x.mcode2, _x.mcode3))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.data is None:
self.data = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.data = []
for i in range(0, length):
val1 = vector_map_msgs.msg.Point()
_x = val1
start = end
end += 60
(_x.pid, _x.b, _x.l, _x.h, _x.bx, _x.ly, _x.ref, _x.mcode1, _x.mcode2, _x.mcode3,) = _get_struct_i5d4i().unpack(str[start:end])
self.data.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.data)
buff.write(_struct_I.pack(length))
for val1 in self.data:
_x = val1
buff.write(_get_struct_i5d4i().pack(_x.pid, _x.b, _x.l, _x.h, _x.bx, _x.ly, _x.ref, _x.mcode1, _x.mcode2, _x.mcode3))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.data is None:
self.data = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.data = []
for i in range(0, length):
val1 = vector_map_msgs.msg.Point()
_x = val1
start = end
end += 60
(_x.pid, _x.b, _x.l, _x.h, _x.bx, _x.ly, _x.ref, _x.mcode1, _x.mcode2, _x.mcode3,) = _get_struct_i5d4i().unpack(str[start:end])
self.data.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_i5d4i = None
def _get_struct_i5d4i():
global _struct_i5d4i
if _struct_i5d4i is None:
_struct_i5d4i = struct.Struct("<i5d4i")
return _struct_i5d4i
| [
"[email protected]"
] | |
8735f5b0e9167684495efe5852cebc7defa664f7 | 930309163b930559929323647b8d82238724f392 | /abc155_c.py | 6b0f7c6960bceb99ef3c1e6274c2f06a7b5baa8f | [] | no_license | GINK03/atcoder-solvers | 874251dffc9f23b187faa77c439b445e53f8dfe1 | b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7 | refs/heads/master | 2021-11-07T14:16:52.138894 | 2021-09-12T13:32:29 | 2021-09-12T13:32:29 | 11,724,396 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | import collections
N=int(input())
S=[input() for i in range(N)]
S=collections.Counter(S)
max_v = max(S.values())
for k,v in sorted(list(filter(lambda x:x[1]==max_v, S.items()))):
print(k)
| [
"[email protected]"
] | |
2326a5cd67d0e36dfc987657a3b77f64b1108019 | 5de646fb3ecf10ecb45e05018a23b6345fb9ca53 | /codejam/2020 Qualification Round/d.py | e358bdc477498577b9dcea874b2bbacb4f08905f | [] | no_license | PPinto22/LeetCode | 5590d6ca87efcd29f9acd2eaed1bcf6805135e29 | 494a35542b61357c98c621202274d774e650a27c | refs/heads/master | 2022-04-29T20:37:31.085120 | 2022-04-02T12:02:30 | 2022-04-02T12:02:30 | 201,478,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,118 | py | from typing import Union, List, Tuple, Optional
def solve(B):
def set(index, value):
nonlocal control_equal, control_complement, known
# Fix to prevent unpaired bits right before a fluctuation
if (not control_complement or not control_equal) \
and (query % 10 == 0) \
and (known % 2 == 0):
return
solution[index] = value
known += 1
pair = get_pair(index)
if not control_equal and value == pair[1]:
control_equal = pair
elif not control_complement \
and pair[1] is not None \
and value != pair[1]:
control_complement = pair
def get_pair(index):
pair_index = B - 1 - index
return [pair_index, solution[pair_index]]
def determine_fluctuation():
nonlocal control_complement, control_equal
possibilities = ['complement', 'reverse', 'both', 'none']
if control_equal:
index, old = control_equal
new = ask(index)
if old == new:
possibilities = [p for p in possibilities if p in {'reverse', 'none'}]
else:
possibilities = [p for p in possibilities if p in {'complement', 'both'}]
control_equal = index, new
if control_complement:
index, old = control_complement
new = ask(index)
if old == new:
possibilities = [p for p in possibilities if p in {'both', 'none'}]
else:
possibilities = [p for p in possibilities if p in {'complement', 'reverse'}]
control_complement = index, new
return possibilities[0]
def apply_fluctuation(fluctuation):
def complement():
for i in range(B):
if solution[i] is not None:
solution[i] = not solution[i]
if fluctuation == 'complement':
complement()
elif fluctuation == 'reverse':
solution.reverse()
elif fluctuation == 'both':
complement()
solution.reverse()
def ask(i):
nonlocal query
query += 1
print(i + 1, flush=True)
response = input()
return True if response == '1' else False
def next_index():
return (known // 2) if (known % 2 == 0) else (B - (known // 2) - 1)
solution: List[Union[bool, None]] = [None] * B
control_equal: Optional[Tuple[int, bool]] = None
control_complement: Optional[Tuple[int, bool]] = None
query = 0
known = 0
while known < B and query < 150:
if query > 0 and query % 10 == 0:
fluctuation = determine_fluctuation()
apply_fluctuation(fluctuation)
else:
index = next_index()
set(index, ask(index))
return ''.join(map(lambda x: '1' if x else '0', solution))
if __name__ == '__main__':
T, B = map(int, input().split())
for Ti in range(1, T + 1):
solution = solve(B)
print(solution, flush=True)
if input() == 'N':
break
| [
"[email protected]"
] | |
e79db74e458b1f23bf9c7d355f33c7457e7e49b8 | 45272da6d64161a586b1dd41df63b8f701f38e39 | /Easy Problems/1-10/1easy.py | 075277c849e0a410bcde57f4d5bf459e7c1e8fad | [] | no_license | Lucas-Guimaraes/Reddit-Daily-Programmer | 559f813d2ee1a06e80a2b260bcb43718ae50b8bf | 45d554d0e0f8bc67e2111bede3a45f77f5512d7b | refs/heads/main | 2023-07-31T18:36:48.774791 | 2021-09-13T04:08:09 | 2021-09-13T04:08:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | #https://www.reddit.com/r/dailyprogrammer/comments/pih8x/easy_challenge_1/
user_name = raw_input("Put in your name: ")
user_age = raw_input("Whhat's your age?: ")
user_screenname = raw_input("How about a username?: ")
print("Your name is " + user_name + " your are " + user_age + " years old, and your username is " + user_screenname)
raw_input()
| [
"[email protected]"
] | |
d5c5909ea6644335136f2d82bcda8a30fa14ccab | 48477a15ad96505def8097a6c098826b1e5cfe1a | /2_basic_algorithms/2_sorting_algorithms/14_pair_sum.py | 9e1422278c00aead20f8116beaac4b3230077a6d | [] | no_license | 450703035/Data-Structures-Algorithms | 02cd5bbb92ce25019fce4955af38b0317b4f4cac | dde33560fcb3e3ff41cf8bd37a454f8c13b15138 | refs/heads/master | 2021-05-22T02:25:03.554870 | 2020-06-27T14:23:24 | 2020-06-27T14:23:24 | 252,927,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,832 | py | # Pair Sum
'''
Problem Statement
Given an input array and a target value (integer), find two values
in the array whose sum is equal to the target value.
Solve the problem without using extra space.
You can assume the array has unique values and will never have
more than one solution.
'''
def pair_sum(arr, target):
"""
:param: arr - input array
:param: target - target value
TODO: complete this method to find two numbers such that their sum is equal to the target
Return the two numbers in the form of a sorted list
"""
# Sort the list
arr.sort()
# Initialize two pointers - one from the start of the array and
# the other from the the end.
front_index = 0
back_index = len(arr) - 1
# Shift the pointers
while front_index < back_index:
front = arr[front_index]
back = arr[back_index]
if front + back == target:
return [front, back]
# Sum < target --> shift front pointer forwards
elif front + back < target:
front_index += 1
# Sum > target --> Shift back pointer backwards
else:
back_index -= 1
return [None, None]
# Test of pair/sum function.
def test_function(test_case):
input_list = test_case[0]
target =test_case[1]
solution = test_case[2]
output = pair_sum(input_list, target)
if output == solution:
print("Pass")
else:
print("False")
input_list = [2, 7, 11, 15]
target = 9
solution = [2, 7]
test_case = [input_list, target, solution]
test_function(test_case)
input_list = [0, 8, 5, 7, 9]
target = 9
solution = [0, 9]
test_case = [input_list, target, solution]
test_function(test_case)
input_list = [110, 9, 89]
target = 9
solution = [None, None]
test_case = [input_list, target, solution]
test_function(test_case)
| [
"[email protected]"
] | |
526abb44076323b13492031101bc312d813868d2 | 40796d49a6d50237900ac1a1a20648b546613d18 | /python/applications/mobdat/common/graph/LayoutNodes.py | 333b5941c57af701d1ba71064bdf91f907c25351 | [] | no_license | Mondego/spacetime-apps | c32abca98134d80f5bff965c8d74550c8109821d | c2d3a714cc2819f4a72d2d0b1b8c129d69c4de7c | refs/heads/master | 2021-01-23T03:43:08.197768 | 2019-07-27T22:08:58 | 2019-07-27T22:08:58 | 86,112,423 | 3 | 3 | null | 2019-07-27T22:08:59 | 2017-03-24T21:34:10 | Python | UTF-8 | Python | false | false | 10,237 | py | #!/usr/bin/env python
"""
Copyright (c) 2014, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file LayoutNodes.py
@author Mic Bowman
@date 2013-12-03
This file defines routines used to build features of a mobdat traffic
network such as building a grid of roads.
"""
import os, sys
import logging
# we need to import python modules from the $SUMO_HOME/tools directory
sys.path.append(os.path.join(os.environ.get("OPENSIM","/share/opensim"),"lib","python"))
sys.path.append(os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "..")))
sys.path.append(os.path.realpath(os.path.join(os.path.dirname(__file__), "..")))
sys.path.append(os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "lib")))
import Node, LayoutDecoration
logger = logging.getLogger(__name__)
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class IntersectionType(Node.Node) :
"""
The IntersectionType class is used to specify parameters for rendering
intersections in Sumo and OpenSim.
"""
# -----------------------------------------------------------------
def __init__(self, name, itype, render) :
"""
Args:
name -- string
itype -- string, indicates the stop light type for the intersection
render -- boolean, flag to indicate that opensim should render the object
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.IntersectionTypeDecoration(name, itype, render))
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class Intersection(Node.Node) :
# -----------------------------------------------------------------
def __init__(self, name, itype, x, y) :
"""
Args:
name -- string
itype -- object of type Layout.IntersectionType
x, y -- integer coordinates
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.CoordDecoration(x, y))
self.AddDecoration(LayoutDecoration.EdgeMapDecoration())
itype.AddMember(self)
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
##class EndPoint(Node.Node) :
class EndPoint(Intersection) :
"""
EndPoint
This graph node class (a subset of intersections) is the destination
for a trip.
Members: None
Decorations:
EndPointDecoration
Edges: None
"""
# -----------------------------------------------------------------
def __init__(self, name, itype, x, y) :
"""
Args:
name -- string
itype -- object of type Layout.IntersectionType
x, y -- integer coordinates
"""
Intersection.__init__(self, name, itype, x, y)
self.AddDecoration(LayoutDecoration.EndPointDecoration())
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class LocationCapsule(Node.Node) :
"""
LocationCapsule
This graph node class manages a collection of EndPoint nodes.
Members: EndPoints, typically one endpoint for a residential
location and multiple endpoints for a business location
Decorations:
CapsuleDecoration
Edges: None
"""
# -----------------------------------------------------------------
def __init__(self, name) :
"""
Args:
name -- string
itype -- object of type Layout.IntersectionType
x, y -- integer coordinates
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.CapsuleDecoration())
# -----------------------------------------------------------------
def AddEndPointToCapsule(self, endpoint) :
"""
Args:
endpoint -- object of type LayoutNodes.EndPoint
"""
self.AddMember(endpoint)
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class BusinessLocation(Node.Node) :
"""
BusinessLocation
This graph node class manages a business neighborhood consisting of
a collection of LocationCapsule objects
Members:- Typically one LocationCapsule nodes that contains multiple
EndPoint nodes
MemberOf:
BusinessLocationProfile
Decorations:
BusinessLocationDecoration
Edges: None
"""
# -----------------------------------------------------------------
def __init__(self, name, profile) :
"""
Args:
name -- string
profile -- object of type BusinessLocationProfile
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.BusinessLocationDecoration())
profile.AddMember(self)
# -----------------------------------------------------------------
def AddCapsuleToLocation(self, capsule) :
"""
Args:
capsule -- object of type LayoutNodes.LocationCapsule
"""
self.AddMember(capsule)
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class ResidentialLocation(Node.Node) :
"""
ResidentialLocation
This graph node class manages a residential neighborhood consisting of
a collection of LocationCapsule objects
Members: Typically several LocationCapsule nodes that each contain
a single EndPoint node
MemberOf:
ResidentialLocationProfile
Decorations:
ResidentialLocationDecoration
Edges: None
"""
# -----------------------------------------------------------------
def __init__(self, name, profile) :
"""
Args:
name -- string
profile -- object of type ResidentialLocationProfile
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.ResidentialLocationDecoration())
profile.AddMember(self)
# -----------------------------------------------------------------
def AddCapsuleToLocation(self, capsule) :
"""
Args:
capsule -- object of type LayoutNodes.LocationCapsule
"""
self.AddMember(capsule)
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class BusinessLocationProfile(Node.Node) :
# -----------------------------------------------------------------
def __init__(self, name, employees, customers, types) :
"""
Args:
name -- string
employees -- integer, max number of employees per node
customers -- integer, max number of customers per node
types -- dict mapping Business.BusinessTypes to count
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.BusinessLocationProfileDecoration(employees, customers, types))
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class ResidentialLocationProfile(Node.Node) :
# -----------------------------------------------------------------
def __init__(self, name, residents) :
"""
Args:
residents -- integer, max number of residents per node
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.ResidentialLocationProfileDecoration(residents))
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class RoadType(Node.Node) :
"""
The RoadType class is used to specify parameters for rendering roads
in Sumo and OpenSim.
"""
# -----------------------------------------------------------------
def __init__(self, name, lanes, pri, speed, wid, sig, render, center) :
"""
Args:
name -- string
lanes -- integer, number of lanes in the road
pri -- integer, priority for stop lights
speed -- float, maximum speed allowed on the road
sig -- string, signature
render -- boolean, flag to indicate whether opensim should render
center -- boolean, flag to indicate the coordinate origin
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.RoadTypeDecoration(name, lanes, pri, speed, wid, sig, render, center))
| [
"[email protected]"
] | |
26520cf0e4d572626cca7f3ae58470069e37fd63 | 5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5 | /blimgui/dist/OpenGL/raw/GLES2/NV/read_buffer.py | 638349916933fad25c3ba754755ffda4f1e717dc | [
"MIT"
] | permissive | juso40/bl2sdk_Mods | 8422a37ca9c2c2bbf231a2399cbcb84379b7e848 | 29f79c41cfb49ea5b1dd1bec559795727e868558 | refs/heads/master | 2023-08-15T02:28:38.142874 | 2023-07-22T21:48:01 | 2023-07-22T21:48:01 | 188,486,371 | 42 | 110 | MIT | 2022-11-20T09:47:56 | 2019-05-24T20:55:10 | Python | UTF-8 | Python | false | false | 617 | py | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_NV_read_buffer'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_NV_read_buffer',error_checker=_errors._error_checker)
GL_READ_BUFFER_NV=_C('GL_READ_BUFFER_NV',0x0C02)
@_f
@_p.types(None,_cs.GLenum)
def glReadBufferNV(mode):pass
| [
"[email protected]"
] | |
410559c8f26e95c96374a7fea4724d3d00169ba7 | 10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94 | /Python/number-of-ways-to-earn-points.py | 6707c76b184e8c02c07e41ef08fcbd9b81e9220e | [
"MIT"
] | permissive | kamyu104/LeetCode-Solutions | f54822059405ef4df737d2e9898b024f051fd525 | 4dc4e6642dc92f1983c13564cc0fd99917cab358 | refs/heads/master | 2023-09-02T13:48:26.830566 | 2023-08-28T10:11:12 | 2023-08-28T10:11:12 | 152,631,182 | 4,549 | 1,651 | MIT | 2023-05-31T06:10:33 | 2018-10-11T17:38:35 | C++ | UTF-8 | Python | false | false | 1,069 | py | # Time: O(n * t * c)
# Space: O(t)
# knapsack dp
class Solution(object):
def waysToReachTarget(self, target, types):
"""
:type target: int
:type types: List[List[int]]
:rtype: int
"""
MOD = 10**9+7
dp = [0]*(target+1)
dp[0] = 1
for c, m in types:
for i in reversed(xrange(1, target+1)):
for j in xrange(1, min(i//m, c)+1):
dp[i] = (dp[i]+dp[i-j*m])%MOD
return dp[-1]
# Time: O(n * t * c)
# Space: O(t)
# knapsack dp
class Solution2(object):
def waysToReachTarget(self, target, types):
"""
:type target: int
:type types: List[List[int]]
:rtype: int
"""
MOD = 10**9+7
dp = [0]*(target+1)
dp[0] = 1
for c, m in types:
new_dp = [0]*(target+1)
for i in xrange(target+1):
for j in xrange(min((target-i)//m, c)+1):
new_dp[i+j*m] = (new_dp[i+j*m]+dp[i])%MOD
dp = new_dp
return dp[-1]
| [
"[email protected]"
] | |
37bf81f3ad11ff153ef7e0c65f8e73638bd8e747 | 76ae6d1194c4440b86eac56e1ed2d42f745e612c | /mcds_dcl2isa-pre-v1.py | c673f914ebc0520107f6229d628e42b73a175689 | [] | no_license | rheiland/mcds2isa | 76a551df09233bd976268c44cf0fa7968f87c075 | c0b1245fafd133701ff41fe12153543b73cb94e6 | refs/heads/master | 2021-07-21T00:11:43.103167 | 2019-08-27T17:23:19 | 2019-08-27T17:23:19 | 143,934,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,636 | py | #
# mcds_dcl2isa.py - using a MultiCellDS digital cell line XML file, generate associated ISA-Tab files
#
# Input:
# a MultiCellDS digital cell line file <DCL-root-filename>.xml
# Output:
# 3 ISA files:
# i_<DCL-root-filename>.txt
# s_<DCL-root-filename>.txt
# a_<DCL-root-filename>.txt
#
# Author: Randy Heiland
# Date:
# v0.1 - May 2018
# v0.2 - Oct 2018 : add more tab sep_char in various rows
#
import os
import sys
import re
import xml.etree.ElementTree as ET
from pathlib import Path # Python 3?
if (len(sys.argv) < 2):
print("Usage: " + sys.argv[0] + " <MultiCellDS Digital Cell Line XML file>")
sys.exit(0)
else:
xml_file = sys.argv[1]
# for testing, just set it
#xml_file = "MCDS_L_0000000052.xml"
header = '\
ONTOLOGY SOURCE REFERENCE\n\
Term Source Name "NCIT" "UO" "NCBITAXON" "EDDA"\n\
Term Source File "https://ncit.nci.nih.gov/ncitbrowser/" "https://bioportal.bioontology.org/ontologies/UO" "http://purl.obolibrary.org/obo/NCBITaxon_1" "http://bioportal.bioontology.org/ontologies/EDDA"\n\
Term Source Version "17.02d" "" "" "2.0"\n\
Term Source Description "NCI Thesarus" "" "" "Evidence in Documents, Discovery, and Analytics (EDDA)"\
'
if not Path(xml_file).is_file():
print(xml_file + 'does not exist!')
sys.exit(-1)
if (os.sep in xml_file):
xml_base_filename = xml_file[xml_file.rfind(os.sep)+1:]
else:
xml_base_filename = xml_file
investigation_filename = "i_" + xml_base_filename[:-4] + ".txt"
study_filename = "s_" + xml_base_filename[:-4] + ".txt"
assay_filename = "a_" + xml_base_filename[:-4] + ".txt"
#=======================================================================
fp = open(investigation_filename, 'w')
tree = ET.parse(xml_file) # TODO: relative path using env var?
xml_root = tree.getroot()
sep_char = '\t' # tab
fp.write(header + '\n')
fp.write('INVESTIGATION\n')
#print(xml_root.find(".//MultiCellDB").find(".//ID").text)
i_identifier = '"' + xml_root.find(".//metadata").find(".//ID").text + '"'
#i_title = '"' + xml_root.find(".//metadata").find(".//name").text + '"'
i_title = '"' + xml_root.find(".//metadata").find(".//name").text + ' Digital Cell Line"'
i_desc = '"' + xml_root.find(".//metadata").find(".//description").text + '"'
i_desc = re.sub('\t','',i_desc)
i_desc = re.sub('\n','',i_desc)
fp.write('Investigation Identifier' + sep_char + i_identifier + '\n')
fp.write('Investigation Title' + sep_char + i_title + '\n')
fp.write('Investigation Description' + sep_char + i_desc + '\n')
fp.write('Investigation Submission Date' + sep_char + '""\n')
fp.write('Investigation Public Release Date \t "" \n')
citation_str = '"' + re.sub('[\t\n]','',xml_root.find(".//citation").find(".//text").text) + '"' # remove all tabs and newlines
fp.write('Comment [MultiCellDS/cell_line/metadata/citation/text]' + sep_char + citation_str + '\n')
# TODO: check that "citation" exists first?
if (xml_root.find(".//citation").find(".//notes")):
fp.write('Comment [MultiCellDS/cell_line/metadata/citation/notes]' + sep_char + xml_root.find(".//citation").find(".//notes").text + '\n')
fp.write('INVESTIGATION PUBLICATIONS\n')
# Extract over all <PMID> in <data_origin> and <data_analysis>
#print('Investigation PubMed ID "21988888" "23084996" "22342935" ' )
# Extract <PMID> and <DOI> in all <data_origin> and <data_analysis>
# TODO? will we have matching # of each?
pmid = []
doi = []
url = []
uep = xml_root.find('.//data_origins') # uep = unique entry point
for elm in uep.findall('data_origin'):
# doi.append(elm.find('.//DOI').text)
doi_ptr = elm.find('.//DOI')
if (doi_ptr == None):
doi_value = ""
else:
doi_value = doi_ptr.text
doi.append(doi_value) # do we want to append "" if none??
# pmid.append(elm.find('.//PMID').text)
pmid_ptr = elm.find('.//PMID')
if (pmid_ptr == None):
pmid_value = ""
else:
pmid_value = pmid_ptr.text
pmid.append(pmid_value)
# pmid.append(pmid_value)
url_ptr = elm.find('.//URL')
if (url_ptr == None):
url_value = ""
else:
url_value = url_ptr.text
url.append(url_value)
#print("(post data_origin) pmid=",pmid)
#print("(post data_origin) url=",url)
uep = xml_root.find('.//metadata')
for elm in uep.findall('data_analysis'):
# print(' "' + el.find('.//PMID').text + '"', end='')
# doi.append(elm.find('.//DOI').text)
# pmid.append(elm.find('.//PMID').text)
doi_ptr = elm.find('.//DOI')
if (doi_ptr == None):
doi_value = ""
else:
doi_value = doi_ptr.text
doi.append(doi_value) # do we want to append "" if none??
# pmid.append(elm.find('.//PMID').text)
pmid_ptr = elm.find('.//PMID')
if (pmid_ptr == None):
pmid_value = ""
else:
pmid_value = pmid_ptr.text
pmid.append(pmid_value)
# pmid.append(pmid_value)
#print("(post data_analysis) pmid=",pmid)
sep_char_sq = sep_char + '"' # tab + single quote
pmid_str = ''
for elm in pmid:
pmid_str += sep_char + '"' + elm + '"'
fp.write('Investigation PubMed ID' + sep_char + pmid_str + '\n')
doi_str = ''
for elm in doi:
doi_str += sep_char + '"' + elm + '"'
fp.write('Investigation Publication DOI' + sep_char + doi_str + '\n')
empty_str = ''.join(sep_char + '""' for x in pmid)
fp.write('Investigation Publication Author List' + sep_char + empty_str + '\n')
fp.write('Investigation Publication Title' + sep_char + empty_str + '\n')
pub_status_str = ''.join('\t"Published"' for x in pmid)
pub_title_str = ''.join('\t""' for x in pmid)
fp.write('Investigation Publication Status' + sep_char + pub_status_str + '\n')
pub_status_TA_str = ''.join('\t"C19026"' for x in pmid)
fp.write('Investigation Publication Status Term Accession' + sep_char + pub_status_TA_str + '\n')
pub_status_TSR_str = ''.join('\t"NCIT"' for x in pmid)
fp.write('Investigation Publication Status Term Source REF' + sep_char + pub_status_TSR_str + '\n')
fp.write('INVESTIGATION CONTACTS\n')
fp.write('Investigation Person Last Name' + sep_char_sq + xml_root.find(".//current_contact").find(".//family-name").text + '"\t\n')
fp.write('Investigation Person First Name' + sep_char_sq + xml_root.find(".//current_contact").find(".//given-names").text + '"\n')
fp.write('Investigation Person Mid Initials' + sep_char + '""\n')
fp.write('Investigation Person Email' + sep_char_sq + xml_root.find(".//current_contact").find(".//email").text + '"\n')
fp.write('Investigation Person Phone' + sep_char + '""\n')
fp.write('Investigation Person Fax' + sep_char + '""\n')
fp.write('Investigation Person Address' + sep_char + '""\n')
fp.write('Investigation Person Affiliation' + sep_char_sq + xml_root.find(".//current_contact").find(".//organization-name").text +
', ' + xml_root.find(".//current_contact").find(".//department-name").text + '"\n')
fp.write('Investigation Person Roles' + sep_char + '""\n')
fp.write('Investigation Person Roles Term Accession Number' + sep_char + '""\n')
fp.write('Investigation Person Roles Term Source REF' + sep_char + '""\n')
fp.write('Comment[Investigation Person REF]' + sep_char + '""\n')
fp.write('STUDY\n')
fp.write('Study Identifier\t' + i_identifier + '\n')
fp.write('Study Title\t' + i_title + '\n')
fp.write('Study Description\t' + i_desc + '\n')
fp.write('Comment[Study Grant Number]\t""\n')
fp.write('Comment[Study Funding Agency]\t""\n')
fp.write('Study Submission Date\t""\n')
fp.write('Study Public Release Date\t""\n')
fp.write('Study File Name\t' + '"' + study_filename + '"\n')
fp.write('STUDY DESIGN DESCRIPTORS\n')
fp.write('Study Design Type\t""\n')
fp.write('Study Design Type Term Accession Number\t""\n')
fp.write('Study Design Type Term Source REF\t""\n')
# TODO? are these different than the previous pubs?
fp.write('STUDY PUBLICATIONS\n')
fp.write('Study PubMed ID' + sep_char + pmid_str + '\n')
fp.write('Study Publication DOI' + sep_char + doi_str + sep_char + '\n')
fp.write('Study Publication Author List' + sep_char + empty_str + '\n')
fp.write('Study Publication Title' + sep_char + pub_title_str + '\n')
fp.write('Study Publication Status' + sep_char + pub_status_str + sep_char + '\n')
fp.write('Study Publication Status Term Accession Number' + sep_char + pub_status_TA_str + sep_char + '\n')
fp.write('Study Publication Status Term Source REF' + sep_char + pub_status_TSR_str + '\n')
fp.write('STUDY FACTORS' + 3*sep_char + '\n')
fp.write('Study Factor Name\t"phenotype_dataset"\n')
fp.write('Study Factor Type\t""\n')
fp.write('Study Factor Type Term Accession Number\t""\n')
fp.write('Study Factor Type Term Source REF\t""\n')
#fp.write('Comment[phenotype_dataset_keywords] "viable; hypoxic; physioxia(standard); physioxia(breast); necrotic,chronic hypoxia"\n')
#fp.write('Comment[phenotype_dataset_keywords] "')
comment_str = 'Comment[phenotype_dataset_keywords]\t"'
uep = xml_root.find('.//cell_line')
for elm in uep.findall('phenotype_dataset'):
comment_str += elm.attrib['keywords'] + '; '
# print(comment_str)
fp.write(comment_str[:-2] + '"\n')
fp.write('STUDY ASSAYS\t\n')
fp.write('Study Assay Measurement Type\t""\n')
fp.write('Study Assay Measurement Type Term Accession Number\t""\n')
fp.write('Study Assay Measurement Type Term Source REF\t""\n')
fp.write('Study Assay Technology Type\t"Digital Cell Line"\n')
fp.write('Study Assay Technology Type Term Accession Number\t""\n')
fp.write('Study Assay Technology Type Term Source REF\t""\n')
fp.write('Study Assay Technology Platform\t""\n')
fp.write('Study Assay File Name\t' + '"' + assay_filename + '"\n')
fp.write('STUDY PROTOCOLS\t\n')
fp.write('Study Protocol Name\t"microenvironment.measurement"\n')
fp.write('Study Protocol Type\t""\n')
fp.write('Study Protocol Type Term Accession Number\t""\n')
fp.write('Study Protocol Type Term Source REF\t""\n')
fp.write('Study Protocol Description\t""\n')
fp.write('Study Protocol URI\t""\n')
fp.write('Study Protocol Version\t""\n')
#fp.write('Study Protocol Parameters Name "oxygen.partial_pressure; DCIS_cell_density(2D).surface_density; DCIS_cell_area_fraction.area_fraction; DCIS_cell_volume_fraction.volume_fraction"\n')
comment_str = 'Study Protocol Parameters Name\t"'
# TODO? search for all phenotype_dataset/microenvironment/domain/variables/...
uep = xml_root.find('.//variables')
if (uep):
for elm in uep.findall('variable'):
if ('type' in elm.attrib.keys()): # TODO: what's desired format if 'type' is missing?
comment_str += elm.attrib['name'] + '.' + elm.attrib['type'] + '; '
else:
comment_str += elm.attrib['name'] + '; '
# comment_str += '; '
# print(comment_str)
fp.write(comment_str[:-2] + '"\n')
semicolon_sep_empty_str = ''.join('; ' for x in pmid)
fp.write('Study Protocol Parameters Name Term Accession Number\t" ' + semicolon_sep_empty_str + ' "\n')
fp.write('Study Protocol Parameters Name Term Source REF\t" ' + semicolon_sep_empty_str + ' "\n')
fp.write('Study Protocol Components Name\t"' + semicolon_sep_empty_str + ' "\n')
fp.write('Study Protocol Components Type\t"' + semicolon_sep_empty_str + ' "\n')
fp.write('Study Protocol Components Type Term Accession Number\t"' + semicolon_sep_empty_str + ' "\n')
fp.write('Study Protocol Components Type Term Source REF\t"' + semicolon_sep_empty_str + ' "\n')
fp.write('STUDY CONTACTS\t\n')
fp.write('Study Person Last Name\t"' + xml_root.find(".//current_contact").find(".//family-name").text + '"\n')
fp.write('Study Person First Name\t"' + xml_root.find(".//current_contact").find(".//given-names").text + '"\n')
fp.write('Study Person Mid Initials\t""\n')
fp.write('Study Person Email\t"' + xml_root.find(".//current_contact").find(".//email").text + '"\n')
fp.write('Study Person Phone\t""\n')
fp.write('Study Person Fax\t""\n')
fp.write('Study Person Address\t""\n')
fp.write('Study Person Affiliation\t"' + xml_root.find(".//current_contact").find(".//organization-name").text +
', ' + xml_root.find(".//current_contact").find(".//department-name").text + '"\n')
fp.write('Study Person Roles\t""\n')
fp.write('Study Person Roles Term Accession Number\t""\n')
fp.write('Study Person Roles Term Source REF\t""\n')
fp.write('Comment[creator_orcid-id_family-name]\t"' + xml_root.find(".//creator").find(".//family-name").text + '"\n')
fp.write('Comment[creator_orcid-id_given-names]\t"' + xml_root.find(".//creator").find(".//given-names").text + '"\n')
fp.write('Comment[creator_orcid-id_email]\t"' + xml_root.find(".//creator").find(".//email").text + '"\n')
fp.write('Comment[creator_orcid-id_organization-name]\t"' + xml_root.find(".//creator").find(".//organization-name").text +
', ' + xml_root.find(".//creator").find(".//department-name").text + '"\n')
#curator_ptr = xml_root.find(".//curator").find(".//family-name").text + '"\n')
family_name = ""
given_names = ""
email = ""
org = ""
dept = ""
curator_ptr = xml_root.find(".//curator")
if (curator_ptr):
family_name_ptr = curator_ptr.find(".//family-name")
given_names_ptr = curator_ptr.find(".//given-names")
email_ptr = curator_ptr.find(".//email")
org_ptr = curator_ptr.find(".//organization-name")
dept_ptr = curator_ptr.find(".//department-name")
if (family_name_ptr):
family_name = family_name_ptr.find(".//family-name").text
if (given_names_ptr):
given_names = given_names_ptr.find(".//given-names").text
if (email_ptr):
email = email_ptr.find(".//email").text
if (org_ptr):
org = org_ptr.find(".//organization-name").text
if (dept_ptr):
dept = dept_ptr.find(".//department-name").text
#fp.write('Comment[curator_orcid-id_family-name]\t"' + xml_root.find(".//curator").find(".//family-name").text + '"\n')
fp.write('Comment[curator_orcid-id_family-name]\t"' + family_name + '"\n')
#fp.write('Comment[curator_orcid-id_given-names]\t"' + xml_root.find(".//curator").find(".//given-names").text + '"\n')
fp.write('Comment[curator_orcid-id_given-names]\t"' + given_names + '"\n')
#fp.write('Comment[curator_orcid-id_email]\t"' + xml_root.find(".//curator").find(".//email").text + '"\n')
fp.write('Comment[curator_orcid-id_email]\t"' + email + '"\n')
fp.write('Comment[curator_orcid-id_organization-name]\t"' + org + ', ' + dept + '"\n')
fp.write('Comment[last_modified_by_orcid-id_family-name]\t"' + xml_root.find(".//last_modified_by").find(".//family-name").text + '"\n')
fp.write('Comment[last_modified_by_orcid-id_given-names]\t"' + xml_root.find(".//last_modified_by").find(".//given-names").text + '"\n')
fp.write('Comment[last_modified_by_orcid-id_email]\t"' + xml_root.find(".//last_modified_by").find(".//email").text + '"\n')
fp.write('Comment[last_modified_by_orcid-id_organization-name]\t"' + xml_root.find(".//last_modified_by").find(".//organization-name").text +
', ' + xml_root.find(".//last_modified_by").find(".//department-name").text + '"\n')
fp.write('Comment[Study Person REF]' + sep_char + '""' + '\n')
fp.close()
print(' --> ' + investigation_filename)
#=======================================================================
fp = open(study_filename, 'w')
# row #1 (column titles)
fp.write('Source Name' + sep_char)
source_name = i_identifier[1:-1] + '.0'
uep = xml_root.find('.//data_origins') # uep = unique entry point
for elm in uep.findall('data_origin'):
for elm2 in elm.findall('citation'):
fp.write('Comment[citation]' + sep_char)
# TODO: why did I insert the following line?
# pmid_origin = elm.find('.//PMID').text
uep = xml_root.find('.//metadata')
for elm in uep.findall('data_analysis'):
for elm2 in elm.findall('citation'):
fp.write('Comment[citation]' + sep_char)
uep = xml_root.find('.//cell_origin')
cell_origin_characteristics = []
if (uep):
for elm in uep.getchildren():
fp.write('Characteristics[' + elm.tag + ']' + sep_char)
text_val = elm.text
text_val = ' '.join(text_val.split()) # strip out tabs and newlines
cell_origin_characteristics.append(text_val)
# print("cell_origin_characteristics----->",cell_origin_characteristics,"<-------")
fp.write('Factor Value[phenotype_dataset]' + sep_char + 'Sample Name\n')
# remaining rows
uep = xml_root.find('.//cell_line')
suffix = 0
for elm in uep.findall('phenotype_dataset'):
row_str = source_name + sep_char
# do we want a hierarchy of preferred citation types? (e.g., PMID,PMCID,DOI,URL)
if (len(pmid) > 0):
for p in pmid:
row_str += 'PMID: ' + p + sep_char
elif (len(url) > 0):
for p in url:
row_str += 'URL: ' + p + sep_char
# print("cell_origin_characteristics=",cell_origin_characteristics)
for c in cell_origin_characteristics:
row_str += c + sep_char
row_str += elm.attrib['keywords'] + sep_char + source_name + '.' + str(suffix)
suffix += 1
# print(row_str)
fp.write(row_str + '\n')
fp.close()
print(' --> ' + study_filename)
#=======================================================================
fp = open(assay_filename, 'w')
"""
Sample Name Protocol REF Parameter Value[oxygen.partial_pressure] Unit Parameter Value[DCIS_cell_density(2D).surface_density] Unit Parameter Value[DCIS_cell_area_fraction.area_fraction] Unit Parameter Value[DCIS_cell_volume_fraction.volume_fraction] Unit Data File
MCDS_L_0000000052.0.0 microenvironment.measurement 6.17 mmHg 0.00883 1/micron^2 0.8 dimensionless 0.8 dimensionless MCDS_L_0000000052.xml
MCDS_L_0000000052.0.1 microenvironment.measurement 8 mmHg MCDS_L_0000000052.xml
MCDS_L_0000000052.0.2 microenvironment.measurement 38 mmHg MCDS_L_0000000052.xml
MCDS_L_0000000052.0.3 microenvironment.measurement 52 mmHg MCDS_L_0000000052.xml
MCDS_L_0000000052.0.4 microenvironment.measurement 5 mmHg MCDS_L_0000000052.xml
"""
# We will do a two-pass approach:
# 1st pass: parse the first instance of the <variables> element to generate the header row.
# UPDATE: cannot assume the first instance of <variables> will be sufficient. The HUVEC data proves otherwise.
#
# Columns' titles
fp.write('Sample Name' + sep_char + 'Protocol REF' + sep_char )
uep = xml_root.find('.//variables') # TODO: also req: keywords="viable"?
# TODO: what to do if there are no
if (uep):
num_vars = 0
for elm in uep.findall('variable'):
if ('type' in elm.attrib.keys()): # TODO: what's desired format if 'type' is missing?
pval_str = elm.attrib['name'] + '.' + elm.attrib['type']
else:
pval_str = elm.attrib['name']
# pval_str = elm.attrib['name'] + '.' + elm.attrib['type']
fp.write('Parameter Value[' + pval_str + '] ' + sep_char + 'Unit' + sep_char)
num_vars += 1
fp.write('Data File\n')
#print('num_vars=',num_vars)
# 2nd pass: for each <phenotype_dataset>, each <variables>, and each <variable>, extract a row of relevant
# info to match the column headings.
count = 0
# TODO: am I making too many assumptions about elements - existence, ordering, etc.?
id = xml_root.find(".//metadata").find(".//ID").text
uep = xml_root.find('.//cell_line')
for elm in uep.findall('phenotype_dataset'):
vs = elm.find('.//variables')
# print("----- found <variables>, count=",count)
nvar = 0
# for ma in v.findall('material_amount'):
if vs:
comment_str = id + '.0.' + str(count) + '\t' + 'microenvironment.measurement'
# print(comment_str)
for v in vs.findall('variable'):
nvar += 1
# print(v.attrib['units'])
# print(v.find('.//material_amount').text)
# Need to strip out tabs here (sometimes)
text_val = v.find('.//material_amount').text
# print('------ text_val --->',text_val,'<---------')
text_val = ' '.join(text_val.split())
# print('------ text_val --->',text_val,'<---------')
if ('units' in v.attrib.keys()): # TODO: what's desired format if missing?
comment_str += sep_char + text_val + sep_char + v.attrib['units']
else:
comment_str += sep_char + text_val + sep_char + ""
# comment_str += sep_char + v.find('.//material_amount').text + sep_char + v.attrib['units']
# print(comment_str)
# print('nvar=',nvar)
fp.write(comment_str)
if (nvar == num_vars):
fp.write(sep_char)
else:
for idx in range(nvar,2*num_vars):
fp.write(sep_char)
# fp.write(comment_str + sep_char + xml_file + '\n')
# fp.write(xml_file + '\n')
# print("----- ",xml_base_filename, " + CR")
fp.write(xml_base_filename + '\n')
count += 1
else: # if no 'variables' present, just print minimal info
# comment_str = id + '.0.' + str(count) + '\t' + '' + '\t' + xml_file + '\n'
comment_str = id + '.0.' + str(count) + '\t' + '' + '\t' + xml_base_filename + '\n'
count += 1
fp.write(comment_str)
fp.close()
print(' --> ' + assay_filename)
| [
"[email protected]"
] | |
b6d70d3cd0bcef780e9d1bf21d1470f79ecdd2e7 | 2fc197681ac9cdd0346fe9ab56d9aa4d59b6f1d0 | /polyaxon/db/migrations/0001_initial.py | 27bab9b4b5e9c713404ae0d918e70a6b313ea7ff | [
"MIT"
] | permissive | dtaniwaki/polyaxon | 32e0fcfc4cd4b46d1d502ae26cd285dc9c11d55a | 04e3c9c9a732a2128233e8d1db1bdc1647fe7c55 | refs/heads/master | 2020-03-20T08:16:33.334881 | 2018-06-13T22:40:17 | 2018-06-13T22:40:17 | 137,303,634 | 0 | 0 | null | 2018-06-14T03:53:13 | 2018-06-14T03:53:13 | null | UTF-8 | Python | false | false | 52,402 | py | # Generated by Django 2.0.3 on 2018-06-12 13:31
import db.models.abstract_jobs
import db.models.repos
import db.models.utils
from django.conf import settings
import django.contrib.postgres.fields
import django.contrib.postgres.fields.jsonb
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import libs.blacklist
import libs.resource_validation
import libs.spec_validation
import re
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='ActivityLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_type', models.CharField(max_length=128)),
('context', django.contrib.postgres.fields.jsonb.JSONField(help_text='Extra context information.')),
('created_at', models.DateTimeField()),
('object_id', models.PositiveIntegerField()),
('actor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
options={
'verbose_name': 'activity log',
'verbose_name_plural': 'activities logs',
},
),
migrations.CreateModel(
name='BuildJob',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this job.')),
('definition', django.contrib.postgres.fields.jsonb.JSONField(default={}, help_text='The specific values/manifest for this job.')),
('config', django.contrib.postgres.fields.jsonb.JSONField(help_text='The compiled polyaxonfile for the build job.', validators=[libs.spec_validation.validate_build_spec_config])),
('dockerfile', models.TextField(blank=True, help_text='The dockerfile used to create the image with this job.', null=True)),
],
bases=(models.Model, db.models.utils.LastStatusMixin, db.models.abstract_jobs.JobMixin),
),
migrations.CreateModel(
name='BuildJobStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('Created', 'Created'), ('Building', 'Building'), ('Scheduled', 'Scheduled'), ('Running', 'Running'), ('Succeeded', 'Succeeded'), ('Failed', 'Failed'), ('Stopped', 'Stopped'), ('UNKNOWN', 'UNKNOWN')], default='Created', max_length=64, null=True)),
('details', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={}, null=True)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.BuildJob')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Build Job Statuses',
'abstract': False,
},
),
migrations.CreateModel(
name='ChartVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('version', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='CliVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('latest_version', models.CharField(max_length=16)),
('min_version', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='Cluster',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('version_api', django.contrib.postgres.fields.jsonb.JSONField(help_text='The cluster version api info')),
],
),
migrations.CreateModel(
name='ClusterEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('data', django.contrib.postgres.fields.jsonb.JSONField()),
('meta', django.contrib.postgres.fields.jsonb.JSONField()),
('level', models.CharField(max_length=16)),
('cluster', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='events', to='db.Cluster')),
],
),
migrations.CreateModel(
name='ClusterNode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this node within the cluser.')),
('name', models.CharField(help_text='Name of the node', max_length=256)),
('hostname', models.CharField(blank=True, max_length=256, null=True)),
('role', models.CharField(choices=[('master', 'master'), ('agent', 'agent')], help_text='The role of the node', max_length=16)),
('docker_version', models.CharField(blank=True, max_length=128, null=True)),
('kubelet_version', models.CharField(max_length=64)),
('os_image', models.CharField(max_length=128)),
('kernel_version', models.CharField(max_length=128)),
('schedulable_taints', models.BooleanField(default=False)),
('schedulable_state', models.BooleanField(default=False)),
('memory', models.BigIntegerField()),
('cpu', models.FloatField()),
('n_gpus', models.PositiveSmallIntegerField()),
('status', models.CharField(choices=[('UNKNOWN', 'UNKNOWN'), ('Ready', 'Ready'), ('NotReady', 'NotReady'), ('Deleted', 'Deleted')], default='UNKNOWN', max_length=24)),
('is_current', models.BooleanField(default=True)),
('cluster', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='nodes', to='db.Cluster')),
],
options={
'ordering': ['sequence'],
},
),
migrations.CreateModel(
name='CodeReference',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('commit', models.CharField(blank=True, max_length=40, null=True)),
],
),
migrations.CreateModel(
name='Experiment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this experiment within the project.')),
('declarations', django.contrib.postgres.fields.jsonb.JSONField(blank=True, help_text='The parameters used for this experiment.', null=True)),
('config', django.contrib.postgres.fields.jsonb.JSONField(blank=True, help_text='The compiled polyaxon with specific values for this experiment.', null=True, validators=[libs.spec_validation.validate_experiment_spec_config])),
('cloning_strategy', models.CharField(blank=True, choices=[('copy', 'copy'), ('restart', 'restart'), ('resume', 'resume')], default='restart', max_length=16, null=True)),
('build_job', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.BuildJob')),
('code_reference', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.CodeReference')),
],
options={
'ordering': ['sequence'],
},
bases=(models.Model, db.models.utils.LastStatusMixin),
),
migrations.CreateModel(
name='ExperimentGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this group within the project.')),
('content', models.TextField(blank=True, help_text='The yaml content of the polyaxonfile/specification.', null=True, validators=[libs.spec_validation.validate_group_spec_content])),
('hptuning', django.contrib.postgres.fields.jsonb.JSONField(blank=True, help_text='The experiment group hptuning params config.', null=True, validators=[libs.spec_validation.validate_group_hptuning_config])),
('code_reference', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.CodeReference')),
],
options={
'ordering': ['sequence'],
},
bases=(models.Model, db.models.utils.LastStatusMixin),
),
migrations.CreateModel(
name='ExperimentGroupIteration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('data', django.contrib.postgres.fields.jsonb.JSONField(help_text='The experiment group iteration meta data.')),
('experiment_group', models.ForeignKey(help_text='The experiment group.', on_delete=django.db.models.deletion.CASCADE, related_name='iterations', to='db.ExperimentGroup')),
],
options={
'ordering': ['created_at'],
},
),
migrations.CreateModel(
name='ExperimentGroupStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('Created', 'Created'), ('Running', 'Running'), ('Succeeded', 'Succeeded'), ('Failed', 'Failed'), ('Stopped', 'Stopped')], default='Created', max_length=64, null=True)),
('experiment_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.ExperimentGroup')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Experiment group Statuses',
},
),
migrations.CreateModel(
name='ExperimentJob',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this job.')),
('definition', django.contrib.postgres.fields.jsonb.JSONField(default={}, help_text='The specific values/manifest for this job.')),
('role', models.CharField(default='master', max_length=64)),
('experiment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='jobs', to='db.Experiment')),
],
options={
'ordering': ['sequence'],
},
bases=(models.Model, db.models.utils.LastStatusMixin),
),
migrations.CreateModel(
name='ExperimentJobStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('Created', 'Created'), ('Building', 'Building'), ('Scheduled', 'Scheduled'), ('Running', 'Running'), ('Succeeded', 'Succeeded'), ('Failed', 'Failed'), ('Stopped', 'Stopped'), ('UNKNOWN', 'UNKNOWN')], default='Created', max_length=64, null=True)),
('details', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={}, null=True)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.ExperimentJob')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Experiment Job Statuses',
'abstract': False,
},
),
migrations.CreateModel(
name='ExperimentMetric',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('values', django.contrib.postgres.fields.jsonb.JSONField()),
('experiment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='metrics', to='db.Experiment')),
],
options={
'ordering': ['created_at'],
},
),
migrations.CreateModel(
name='ExperimentStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('Created', 'Created'), ('Resuming', 'Resuming'), ('Building', 'Building'), ('Scheduled', 'Scheduled'), ('Starting', 'Starting'), ('Running', 'Running'), ('Succeeded', 'Succeeded'), ('Failed', 'Failed'), ('Stopped', 'Stopped'), ('UNKNOWN', 'UNKNOWN')], default='Created', max_length=64, null=True)),
('experiment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.Experiment')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Experiment Statuses',
},
),
migrations.CreateModel(
name='ExternalRepo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('git_url', models.URLField()),
('is_public', models.BooleanField(default=True, help_text='If repo is public or private.')),
],
bases=(models.Model, db.models.repos.RepoMixin),
),
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this job.')),
('definition', django.contrib.postgres.fields.jsonb.JSONField(default={}, help_text='The specific values/manifest for this job.')),
('config', django.contrib.postgres.fields.jsonb.JSONField(help_text='The compiled polyaxonfile for the run job.', validators=[libs.spec_validation.validate_job_spec_config])),
('cloning_strategy', models.CharField(blank=True, choices=[('copy', 'copy'), ('restart', 'restart'), ('resume', 'resume')], default='restart', max_length=16, null=True)),
('build_job', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.BuildJob')),
('code_reference', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.CodeReference')),
('original_job', models.ForeignKey(blank=True, help_text='The original job that was cloned from.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='clones', to='db.Job')),
],
bases=(models.Model, db.models.utils.LastStatusMixin, db.models.abstract_jobs.JobMixin),
),
migrations.CreateModel(
name='JobResources',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cpu', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True, validators=[libs.resource_validation.validate_resource])),
('memory', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True, validators=[libs.resource_validation.validate_resource])),
('gpu', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True, validators=[libs.resource_validation.validate_resource])),
],
options={
'verbose_name': 'job resources',
'verbose_name_plural': 'jobs resources',
},
),
migrations.CreateModel(
name='JobStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('Created', 'Created'), ('Building', 'Building'), ('Scheduled', 'Scheduled'), ('Running', 'Running'), ('Succeeded', 'Succeeded'), ('Failed', 'Failed'), ('Stopped', 'Stopped'), ('UNKNOWN', 'UNKNOWN')], default='Created', max_length=64, null=True)),
('details', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={}, null=True)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.Job')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Run Job Statuses',
'abstract': False,
},
),
migrations.CreateModel(
name='LibVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('latest_version', models.CharField(max_length=16)),
('min_version', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='NodeGPU',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('index', models.PositiveSmallIntegerField()),
('serial', models.CharField(max_length=256)),
('name', models.CharField(max_length=256)),
('memory', models.BigIntegerField()),
('cluster_node', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='gpus', to='db.ClusterNode')),
],
options={
'ordering': ['index'],
},
),
migrations.CreateModel(
name='NotebookJob',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this job.')),
('definition', django.contrib.postgres.fields.jsonb.JSONField(default={}, help_text='The specific values/manifest for this job.')),
('config', django.contrib.postgres.fields.jsonb.JSONField(help_text='The compiled polyaxonfile for the notebook job.', validators=[libs.spec_validation.validate_notebook_spec_config])),
('build_job', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.BuildJob')),
('code_reference', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.CodeReference')),
],
bases=(models.Model, db.models.utils.LastStatusMixin, db.models.abstract_jobs.JobMixin),
),
migrations.CreateModel(
name='NotebookJobStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('Created', 'Created'), ('Building', 'Building'), ('Scheduled', 'Scheduled'), ('Running', 'Running'), ('Succeeded', 'Succeeded'), ('Failed', 'Failed'), ('Stopped', 'Stopped'), ('UNKNOWN', 'UNKNOWN')], default='Created', max_length=64, null=True)),
('details', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={}, null=True)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.NotebookJob')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Notebook Job Statuses',
'abstract': False,
},
),
migrations.CreateModel(
name='Operation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('execute_at', models.DateTimeField(blank=True, help_text='When this instance should be executed. default None which translate to now', null=True)),
('timeout', models.PositiveIntegerField(blank=True, help_text='specify how long this instance should be up before timing out in seconds.', null=True)),
('trigger_policy', models.CharField(blank=True, choices=[('all_succeeded', 'all_succeeded'), ('all_failed', 'all_failed'), ('all_done', 'all_done'), ('one_succeeded', 'one_succeeded'), ('one_failed', 'one_failed'), ('one_done', 'one_done')], default='all_succeeded', help_text='defines the rule by which dependencies are applied, default is `all_success`.', max_length=16, null=True)),
('max_retries', models.PositiveSmallIntegerField(blank=True, help_text='the number of retries that should be performed before failing the operation.', null=True)),
('retry_delay', models.PositiveIntegerField(blank=True, default=60, help_text='The delay between retries.', null=True)),
('retry_exponential_backoff', models.BooleanField(default=False, help_text='allow progressive longer waits between retries by using exponential backoff algorithm on retry delay.')),
('max_retry_delay', models.PositiveIntegerField(blank=True, default=3600, help_text='maximum delay interval between retries.', null=True)),
('concurrency', models.PositiveSmallIntegerField(blank=True, help_text='When set, an operation will be able to limit the concurrent runs across execution_dates', null=True)),
('run_as_user', models.CharField(blank=True, help_text='unix username to impersonate while running the operation.', max_length=64, null=True)),
('config', models.TextField(blank=True, null=True)),
('celery_task', models.CharField(help_text='The celery task name to execute.', max_length=128)),
('celery_queue', models.CharField(blank=True, help_text='The celery queue name to use for the executing this task. If provided, it will override the queue provided in CELERY_TASK_ROUTES.', max_length=128, null=True)),
],
),
migrations.CreateModel(
name='OperationRun',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('celery_task_context', django.contrib.postgres.fields.jsonb.JSONField(blank=True, help_text='The kwargs required to execute the celery task.', null=True)),
('celery_task_id', models.CharField(blank=True, max_length=36)),
('operation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='runs', to='db.Operation')),
],
bases=(models.Model, db.models.utils.LastStatusMixin),
),
migrations.CreateModel(
name='OperationRunStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('created', 'created'), ('scheduled', 'scheduled'), ('running', 'running'), ('finished', 'finished'), ('stopped', 'stopped'), ('skipped', 'skipped')], default='created', max_length=64, null=True)),
('operation_run', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.OperationRun')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Operation Run Statuses',
},
),
migrations.CreateModel(
name='Pipeline',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('execute_at', models.DateTimeField(blank=True, help_text='When this instance should be executed. default None which translate to now', null=True)),
('timeout', models.PositiveIntegerField(blank=True, help_text='specify how long this instance should be up before timing out in seconds.', null=True)),
('name', models.CharField(max_length=256, validators=[django.core.validators.RegexValidator(re.compile('^[-a-zA-Z0-9_]+\\Z'), "Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens.", 'invalid'), libs.blacklist.validate_blacklist_name])),
('concurrency', models.PositiveSmallIntegerField(blank=True, help_text='If set, it determines the number of operation instances allowed to run concurrently.', null=True)),
],
),
migrations.CreateModel(
name='PipelineRun',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('pipeline', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='runs', to='db.Pipeline')),
],
bases=(models.Model, db.models.utils.LastStatusMixin),
),
migrations.CreateModel(
name='PipelineRunStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('created', 'created'), ('scheduled', 'scheduled'), ('running', 'running'), ('finished', 'finished'), ('stopped', 'stopped'), ('skipped', 'skipped')], default='created', max_length=64, null=True)),
('pipeline_run', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.PipelineRun')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Pipeline Run Statuses',
},
),
migrations.CreateModel(
name='PlatformVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('latest_version', models.CharField(max_length=16)),
('min_version', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('name', models.CharField(max_length=256, validators=[django.core.validators.RegexValidator(re.compile('^[-a-zA-Z0-9_]+\\Z'), "Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens.", 'invalid'), libs.blacklist.validate_blacklist_name])),
('is_public', models.BooleanField(default=True, help_text='If project is public or private.')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='projects', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Repo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('is_public', models.BooleanField(default=True, help_text='If repo is public or private.')),
('project', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='repo', to='db.Project')),
],
bases=(models.Model, db.models.repos.RepoMixin),
),
migrations.CreateModel(
name='Schedule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('frequency', models.CharField(blank=True, help_text="Defines how often to run, this timedelta object gets added to your latest operation instance's execution_date to figure out the next schedule", max_length=64, null=True)),
('start_at', models.DateTimeField(blank=True, help_text='When this instance should run, default is None which translate to now.', null=True)),
('end_at', models.DateTimeField(blank=True, help_text='When this instance should stop running, default is None which translate to open ended.', null=True)),
('depends_on_past', models.BooleanField(default=False, help_text="when set to true, the instances will run sequentially while relying on the previous instances' schedule to succeed.")),
],
),
migrations.CreateModel(
name='SSOIdentity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('provider', models.CharField(choices=[('github', 'github'), ('bitbucket', 'bitbucket'), ('gitlab', 'gitlab')], max_length=32)),
('external_id', models.CharField(max_length=64, null=True)),
('valid', models.BooleanField(default=False)),
('last_verified', models.DateTimeField(default=django.utils.timezone.now)),
('last_synced', models.DateTimeField(default=django.utils.timezone.now)),
('scopes', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=200), blank=True, null=True, size=None)),
('data', django.contrib.postgres.fields.jsonb.JSONField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='identities', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'SSO identity',
'verbose_name_plural': 'SSO identities',
},
),
migrations.CreateModel(
name='TensorboardJob',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this job.')),
('definition', django.contrib.postgres.fields.jsonb.JSONField(default={}, help_text='The specific values/manifest for this job.')),
('config', django.contrib.postgres.fields.jsonb.JSONField(help_text='The compiled polyaxonfile for the tensorboard job.', validators=[libs.spec_validation.validate_tensorboard_spec_config])),
('build_job', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.BuildJob')),
('code_reference', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.CodeReference')),
('experiment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tensorboard_jobs', to='db.Experiment')),
('experiment_group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tensorboard_jobs', to='db.ExperimentGroup')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tensorboard_jobs', to='db.Project')),
],
bases=(models.Model, db.models.utils.LastStatusMixin, db.models.abstract_jobs.JobMixin),
),
migrations.CreateModel(
name='TensorboardJobStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('Created', 'Created'), ('Building', 'Building'), ('Scheduled', 'Scheduled'), ('Running', 'Running'), ('Succeeded', 'Succeeded'), ('Failed', 'Failed'), ('Stopped', 'Stopped'), ('UNKNOWN', 'UNKNOWN')], default='Created', max_length=64, null=True)),
('details', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={}, null=True)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.TensorboardJob')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Tensorboard Job Statuses',
'abstract': False,
},
),
migrations.AddField(
model_name='tensorboardjob',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.TensorboardJobStatus'),
),
migrations.AddField(
model_name='tensorboardjob',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='pipelinerun',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.PipelineRunStatus'),
),
migrations.AddField(
model_name='pipeline',
name='project',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='pipelines', to='db.Project'),
),
migrations.AddField(
model_name='pipeline',
name='schedule',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='db.Schedule'),
),
migrations.AddField(
model_name='pipeline',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='pipelines', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='operationrun',
name='pipeline_run',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='operation_runs', to='db.PipelineRun'),
),
migrations.AddField(
model_name='operationrun',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.OperationRunStatus'),
),
migrations.AddField(
model_name='operationrun',
name='upstream_runs',
field=models.ManyToManyField(blank=True, related_name='downstream_runs', to='db.OperationRun'),
),
migrations.AddField(
model_name='operation',
name='pipeline',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='operations', to='db.Pipeline'),
),
migrations.AddField(
model_name='operation',
name='schedule',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='db.Schedule'),
),
migrations.AddField(
model_name='operation',
name='upstream_operations',
field=models.ManyToManyField(blank=True, related_name='downstream_operations', to='db.Operation'),
),
migrations.AddField(
model_name='notebookjob',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notebook_jobs', to='db.Project'),
),
migrations.AddField(
model_name='notebookjob',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.NotebookJobStatus'),
),
migrations.AddField(
model_name='notebookjob',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='job',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='jobs', to='db.Project'),
),
migrations.AddField(
model_name='job',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.JobStatus'),
),
migrations.AddField(
model_name='job',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='externalrepo',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='external_repos', to='db.Project'),
),
migrations.AddField(
model_name='experimentjob',
name='resources',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.JobResources'),
),
migrations.AddField(
model_name='experimentjob',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.ExperimentJobStatus'),
),
migrations.AddField(
model_name='experimentgroup',
name='project',
field=models.ForeignKey(help_text='The project this polyaxonfile belongs to.', on_delete=django.db.models.deletion.CASCADE, related_name='experiment_groups', to='db.Project'),
),
migrations.AddField(
model_name='experimentgroup',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.ExperimentGroupStatus'),
),
migrations.AddField(
model_name='experimentgroup',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='experiment_groups', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='experiment',
name='experiment_group',
field=models.ForeignKey(blank=True, help_text='The experiment group that generate this experiment.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='experiments', to='db.ExperimentGroup'),
),
migrations.AddField(
model_name='experiment',
name='metric',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.ExperimentMetric'),
),
migrations.AddField(
model_name='experiment',
name='original_experiment',
field=models.ForeignKey(blank=True, help_text='The original experiment that was cloned from.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='clones', to='db.Experiment'),
),
migrations.AddField(
model_name='experiment',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='experiments', to='db.Project'),
),
migrations.AddField(
model_name='experiment',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.ExperimentStatus'),
),
migrations.AddField(
model_name='experiment',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='experiments', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='codereference',
name='external_repo',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='references', to='db.ExternalRepo'),
),
migrations.AddField(
model_name='codereference',
name='repo',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='references', to='db.Repo'),
),
migrations.AddField(
model_name='buildjob',
name='code_reference',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.CodeReference'),
),
migrations.AddField(
model_name='buildjob',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='build_jobs', to='db.Project'),
),
migrations.AddField(
model_name='buildjob',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.BuildJobStatus'),
),
migrations.AddField(
model_name='buildjob',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='ssoidentity',
unique_together={('provider', 'user'), ('provider', 'external_id')},
),
migrations.AlterUniqueTogether(
name='project',
unique_together={('user', 'name')},
),
migrations.AlterUniqueTogether(
name='nodegpu',
unique_together={('cluster_node', 'index')},
),
migrations.AlterUniqueTogether(
name='externalrepo',
unique_together={('project', 'git_url')},
),
migrations.AlterUniqueTogether(
name='experimentjob',
unique_together={('experiment', 'sequence')},
),
migrations.AlterUniqueTogether(
name='experimentgroup',
unique_together={('project', 'sequence')},
),
migrations.AlterUniqueTogether(
name='experiment',
unique_together={('project', 'sequence')},
),
migrations.AlterUniqueTogether(
name='clusternode',
unique_together={('cluster', 'sequence')},
),
]
| [
"[email protected]"
] | |
6f0cf4d61aa094e7e4958d5d2d42c7ee379e097f | 942a82cd1e34cd8f57e1d7f3272e4086605256ee | /config/settings.py | 4ab609f97c0680e52cc1f2490a6f0d441b5e6b02 | [] | no_license | hanieh-mav/SocialNetwork-with-drf | d451126f93e3735a8c9d6dbf714a8179785e15cc | d929704a3d9f26e1e0ca5d961a01ba7dd5c6bf84 | refs/heads/main | 2023-06-13T08:17:46.591597 | 2021-07-09T13:37:06 | 2021-07-09T13:37:06 | 353,754,846 | 2 | 0 | null | 2021-07-09T13:27:27 | 2021-04-01T16:04:26 | Python | UTF-8 | Python | false | false | 4,482 | py | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@81g)s8gw+7-84o%ks%*8&j$cbb+&m%(#)+e6getb5o40@vil)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'crispy_forms',
'posts.apps.PostsConfig',
'accounts.apps.AccountsConfig',
'postapi.apps.PostapiConfig',
'accountapi.apps.AccountapiConfig',
'rest_framework',
'rest_framework.authtoken',
'dj_rest_auth',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'dj_rest_auth.registration',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
#LOGIN_URL
LOGIN_URL = 'accounts:login'
LOGIN_REDIRECT_URL = 'posts:post-list'
#LOGOUT_URL
LOGOUT_REDIRECT_URL = 'posts:post-list'
STATIC_URL = '/static/'
#MEDIA
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
AUTH_USER_MODEL = 'accounts.User'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated'
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'dj_rest_auth.jwt_auth.JWTCookieAuthentication',
],
}
SITE_ID = 1
REST_USE_JWT = True
JWT_AUTH_COOKIE = 'my-app-auth'
JWT_AUTH_REFRESH_COOKIE = 'my-refresh-token'
#EMAIL SETTING
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = 'xxxxxxxxxxxxxxxxxxx'
EMAIL_PORT = 587
EMAIL_USE_TLS = True | [
"[email protected]"
] | |
13420aecf149f66ef9cb63a68a5a090dbc8a2e3c | 6c3e475dcd95d14875a199466b8a7c744f61478b | /userProfile/userProfile.py | 1395f4986a45fed5e4b88da12ed0cb114aa8c04b | [] | no_license | webclinic017/tripleATradeBot | b4cce7a330e76f9f207c4d6f4d16327b1717a17a | 40b6130f52eb969336c7b602e698f41a2d8f947b | refs/heads/main | 2023-01-04T04:16:38.338563 | 2020-10-29T10:33:34 | 2020-10-29T10:33:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py | from eventLogger import eventLogger as logger
from pathList import pathList
from alice_blue import *
import openpyxl
class userProfile:
userProfileWorkBook=""
profileName=""
userName = ""
password = ""
apiSecret = ""
accessToken = ""
aliceObj = ""
exchangeList = ['NSE']
def __init__(self, profileName):
self.userProfileWorkBook = openpyxl.load_workbook(pathList.userProfileFileName)
self.profileName = profileName
self.userName = self.userProfileWorkBook.get_sheet_by_name(self.profileName)['A1'].value
self.password = self.userProfileWorkBook.get_sheet_by_name(self.profileName)['A2'].value
self.apiSecret = self.userProfileWorkBook.get_sheet_by_name(self.profileName)['A3'].value
logger.info(self.userName)
logger.info(self.password)
logger.info(self.apiSecret)
def login(self):
logger.info("login")
self.accessToken = AliceBlue.login_and_get_access_token(username=self.userName, password=self.password, twoFA='a', api_secret=self.apiSecret)
self.aliceObj = AliceBlue(username=self.userName, password=self.password, access_token=self.accessToken, master_contracts_to_download=self.exchangeList)
def profileData(self):
logger.info("profileData")
print (self.aliceObj.get_profile())
print (self.aliceObj.get_balance())
| [
"[email protected]"
] | |
62abf1b5cf573596ca943d290748c41b37bd2e49 | 4dfb1731e42654d2694b9ea109b0da26f0e6215c | /qbittorrent_mod.py | d6a8fc5bab59ef956ce8f458554ba67a2d766cb4 | [
"MIT"
] | permissive | y2038558528/flexget_qbittorrent_mod | 3e89e13c8814e21de51e101f3430ce660b4cfcb5 | a49dacf0b4bf20217cb43df0ad94112b7dc67364 | refs/heads/master | 2023-03-22T22:00:04.330858 | 2021-03-15T13:45:02 | 2021-03-15T13:45:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,029 | py | import math
import os
import re
from datetime import datetime
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from loguru import logger
from .ptsites.client.qbittorrent_client import QBittorrentClientFactory
class QBittorrentModBase:
def __init__(self):
self.client = None
def prepare_config(self, config):
if isinstance(config, bool):
config = {'enabled': config}
config.setdefault('enabled', True)
config.setdefault('host', 'localhost')
config.setdefault('port', 8080)
config.setdefault('use_ssl', False)
config.setdefault('verify_cert', True)
return config
def create_client(self, config):
client = QBittorrentClientFactory().get_client(config)
return client
def on_task_start(self, task, config):
self.client = None
config = self.prepare_config(config)
if config['enabled']:
if task.options.test:
logger.info('Trying to connect to qBittorrent...')
self.client = self.create_client(config)
if self.client:
logger.info('Successfully connected to qBittorrent.')
else:
logger.error('It looks like there was a problem connecting to qBittorrent.')
class PluginQBittorrentModInput(QBittorrentModBase):
schema = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'use_ssl': {'type': 'boolean'},
'port': {'type': 'integer'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'verify_cert': {'type': 'boolean'},
'server_state': {'oneOf': [{'type': 'boolean'}, {'type': 'string'}]},
'force_update': {'type': 'boolean'},
'enabled': {'type': 'boolean'},
},
'additionalProperties': False
}
def prepare_config(self, config):
config = QBittorrentModBase.prepare_config(self, config)
return config
def on_task_input(self, task, config):
config = self.prepare_config(config)
if not config['enabled']:
return
server_state = config.get('server_state')
if server_state:
entry = Entry(
title='qBittorrent Server State' if isinstance(server_state, bool) else server_state,
url=config.get('host')
)
entry['time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
entry['server_state'] = {}
try:
self.client = self.create_client(config)
entry['server_state'] = self.client.get_main_data_snapshot(id(task)).get('server_state')
entry['server_state']['flexget_connected'] = True
except plugin.PluginError:
entry['server_state']['flexget_connected'] = False
return [entry]
else:
self.client = self.create_client(config)
force_update = config.get('force_update', False)
return list(
self.client.get_main_data_snapshot(id(task), force_update=force_update).get('entry_dict').values())
class PluginQBittorrentMod(QBittorrentModBase):
schema = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'use_ssl': {'type': 'boolean'},
'port': {'type': 'integer'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'verify_cert': {'type': 'boolean'},
'action': {
'type': 'object',
'properties': {
'add': {
'type': 'object',
'properties': {
'savepath': {'type': 'string'},
'cookie': {'type': 'string'},
'category': {'type': 'string'},
'skip_checking': {'type': 'boolean'},
'paused': {'type': 'string'},
'root_folder': {'type': 'string'},
'rename': {'type': 'string'},
'upLimit': {'type': 'integer'},
'dlLimit': {'type': 'integer'},
'autoTMM': {'type': 'boolean'},
'sequentialDownload': {'type': 'string'},
'firstLastPiecePrio': {'type': 'string'},
'reject_on': {
'type': 'object',
'properties': {
'bandwidth_limit': {'type': 'integer'},
'dl_speed': {
'oneOf': [
{'type': 'boolean'},
{'type': 'integer'},
{'type': 'number', 'minimum': 0.1, 'maximum': 0.9},
]
},
'dl_limit': {'oneOf': [{'type': 'boolean'}, {'type': 'integer'}]}
}
}
}
},
'remove': {
'type': 'object',
'properties': {
'keeper': {
'type': 'object',
'properties': {
'keep_disk_space': {'type': 'integer'},
'check_reseed': {
'oneOf': [{'type': 'boolean'}, {'type': 'array', 'items': {'type': 'string'}}]},
'delete_files': {'type': 'boolean'},
'dl_limit_on_succeeded': {'type': 'integer'},
'alt_dl_limit_on_succeeded': {'type': 'integer'},
'dl_limit_interval': {'type': 'integer'}
},
},
'cleaner': {
'type': 'object',
'properties': {
'delete_files': {'type': 'boolean'}
}
}
},
"minProperties": 1,
"maxProperties": 1,
},
'resume': {
'type': 'object',
'properties': {
'recheck_torrents': {'type': 'boolean'}
}
},
'pause': {
'type': 'boolean'
},
'modify': {
'type': 'object',
'properties': {
'tag_by_tracker': {'type': 'boolean'},
'replace_trackers': {
'type': 'object',
'properties': {
}
}
}
},
'manage_conn': {
'type': 'object',
'properties': {
'min': {'type': 'integer'},
'max': {'type': 'integer'}
}
},
'limit_upload_by_tracker': {
'type': 'object',
'properties': {
'working': {'type': 'integer'},
'not_working': {'type': 'integer'}
}
}
},
"minProperties": 1,
"maxProperties": 1,
},
'fail_html': {'type': 'boolean'},
},
'additionalProperties': False
}
def prepare_config(self, config):
config = super().prepare_config(config)
config.setdefault('fail_html', True)
return config
@plugin.priority(120)
def on_task_download(self, task, config):
config = self.prepare_config(config)
add_options = config.get('action').get('add')
if not add_options or not task.accepted:
return
if not self.client:
self.client = self.create_client(config)
if self.client:
logger.debug('Successfully connected to qBittorrent.')
else:
raise plugin.PluginError("Couldn't connect to qBittorrent.")
main_data_snapshot = self.client.get_main_data_snapshot(id(task))
server_state = main_data_snapshot.get('server_state')
reject_on = add_options.get('reject_on')
bandwidth_limit = reject_on.get('bandwidth_limit')
reject_on_dl_speed = reject_on.get('dl_speed')
reject_on_dl_limit = reject_on.get('dl_limit')
reject_reason = ''
dl_rate_limit = server_state.get('dl_rate_limit')
if reject_on_dl_limit:
if dl_rate_limit and dl_rate_limit < reject_on_dl_limit:
reject_reason = 'dl_limit: {:.2F} MiB < reject_on_dl_limit: {:.2F} MiB'.format(
dl_rate_limit / (1024 * 1024), reject_on_dl_limit / (1024 * 1024))
if reject_on_dl_speed:
if isinstance(reject_on_dl_speed, float):
dl_rate_limit = dl_rate_limit if dl_rate_limit else bandwidth_limit
reject_on_dl_speed = int(dl_rate_limit * reject_on_dl_speed)
dl_info_speed = server_state.get('dl_info_speed')
if dl_info_speed and dl_info_speed > reject_on_dl_speed:
reject_reason = 'dl_speed: {:.2F} MiB > reject_on_dl_speed: {:.2F} MiB'.format(
dl_info_speed / (1024 * 1024), reject_on_dl_speed / (1024 * 1024))
for entry in task.accepted:
if reject_reason:
entry.reject(reason=reject_reason, remember=True)
site_name = self._get_site_name(entry.get('url'))
logger.info('reject {}, because: {}, site: {}', entry['title'], reject_reason, site_name)
continue
if 'download' not in task.config:
download = plugin.get('download', self)
download.get_temp_files(task, handle_magnets=True, fail_html=config['fail_html'])
@plugin.priority(135)
def on_task_output(self, task, config):
config = self.prepare_config(config)
action_config = config.get('action')
if len(action_config) != 1:
raise plugin.PluginError('There must be and only one action')
# don't add when learning
if task.options.learn:
return
if not task.accepted and not action_config.get('remove'):
return
if not self.client:
self.client = self.create_client(config)
if self.client:
logger.debug('Successfully connected to qBittorrent.')
else:
raise plugin.PluginError("Couldn't connect to qBittorrent.")
(action_name, option), = action_config.items()
action = getattr(self, action_name + '_entries', None)
if action:
action(task, option)
else:
raise plugin.PluginError('Unknown action.')
def add_entries(self, task, add_options):
options = {}
for entry in task.accepted:
for attr_str in ['savepath',
'cookie',
'category',
'skip_checking',
'paused',
'root_folder',
'rename',
'upLimit',
'dlLimit',
'autoTMM',
'sequentialDownload',
'firstLastPiecePrio']:
attr = entry.get(attr_str, add_options.get(attr_str))
if attr:
options[attr_str] = attr
if options.get('autoTMM') and options.get('category') and options.get('savepath'):
del options['savepath']
is_magnet = entry['url'].startswith('magnet:')
if not is_magnet:
if 'file' not in entry:
entry.fail('File missing?')
return
if not os.path.exists(entry['file']):
tmp_path = os.path.join(task.manager.config_base, 'temp')
logger.debug('entry: {}', entry)
logger.debug('temp: {}', ', '.join(os.listdir(tmp_path)))
entry.fail("Downloaded temp file '%s' doesn't exist!?" % entry['file'])
return
self.client.add_torrent_file(entry['file'], options)
else:
self.client.add_torrent_url(entry['url'], options)
def remove_entries(self, task, remove_options):
(mode_name, option), = remove_options.items()
mode = getattr(self, 'remove_entries_' + mode_name, None)
if mode:
mode(task, option)
else:
raise plugin.PluginError('Unknown mode.')
def remove_entries_keeper(self, task, keeper_options):
delete_files = keeper_options.get('delete_files')
check_reseed = keeper_options.get('check_reseed')
keep_disk_space = keeper_options.get('keep_disk_space')
dl_limit_interval = keeper_options.get('dl_limit_interval', 24 * 60 * 60)
main_data_snapshot = self.client.get_main_data_snapshot(id(task))
server_state = main_data_snapshot.get('server_state')
dl_rate_limit = server_state.get('dl_rate_limit')
use_alt_speed_limits = server_state.get('use_alt_speed_limits')
free_space_on_disk = server_state.get('free_space_on_disk')
dl_limit_mode = 'dl_limit'
dl_limit_on_succeeded = keeper_options.get('dl_limit_on_succeeded', 0)
alt_dl_limit_on_succeeded = keeper_options.get('alt_dl_limit_on_succeeded', 0)
if use_alt_speed_limits:
dl_limit_mode = 'alt_dl_limit'
dl_limit_on_succeeded = alt_dl_limit_on_succeeded
keep_disk_space = keep_disk_space * 1024 * 1024 * 1024
if keep_disk_space < free_space_on_disk:
if dl_limit_on_succeeded is not None:
dl_limit = math.floor(dl_limit_on_succeeded / 1024) * 1024
if dl_limit != dl_rate_limit:
self.client.set_application_preferences('{{"{}": {}}}'.format(dl_limit_mode, dl_limit))
logger.info("set {} to {} KiB/s", dl_limit_mode, dl_limit / 1024)
return
accepted_entry_hashes = []
delete_hashes = []
delete_size = 0
if not task.accepted:
self.calc_and_set_dl_limit(keep_disk_space, free_space_on_disk, delete_size, dl_limit_interval,
dl_limit_on_succeeded, dl_rate_limit, dl_limit_mode)
return
entry_dict = main_data_snapshot.get('entry_dict')
reseed_dict = main_data_snapshot.get('reseed_dict')
for entry in task.accepted:
accepted_entry_hashes.append(entry['torrent_info_hash'])
for entry_hash in accepted_entry_hashes:
if entry_hash in delete_hashes:
continue
server_entry = entry_dict.get(entry_hash)
if not server_entry:
self.client.reset_rid()
save_path_with_name = server_entry.get('qbittorrent_save_path_with_name')
reseed_entry_list = reseed_dict.get(save_path_with_name)
check_hashes = []
torrent_hashes = []
torrent_size = 0
for reseed_entry in reseed_entry_list:
if reseed_entry['qbittorrent_completed'] != 0:
torrent_size = reseed_entry['qbittorrent_completed']
if isinstance(check_reseed, list):
trackers = reseed_entry['qbittorrent_trackers']
site_names = []
for tracker in trackers:
site_names.append(self._get_site_name(tracker.get('url')))
if len(set(check_reseed) & set(site_names)) > 0:
check_hashes.append(reseed_entry['torrent_info_hash'])
else:
check_hashes.append(reseed_entry['torrent_info_hash'])
torrent_hashes.append(reseed_entry['torrent_info_hash'])
if check_reseed and not set(accepted_entry_hashes) >= set(check_hashes):
for torrent_hash in torrent_hashes:
entry_dict.get(torrent_hash).reject(
reason='torrents with the same save path are not all tested')
continue
else:
if keep_disk_space > free_space_on_disk + delete_size:
delete_size += torrent_size
self._build_delete_hashes(delete_hashes, torrent_hashes, entry_dict, keep_disk_space,
free_space_on_disk, delete_size)
if keep_disk_space < free_space_on_disk + delete_size:
break
self.calc_and_set_dl_limit(keep_disk_space, free_space_on_disk, delete_size, dl_limit_interval,
dl_limit_on_succeeded, dl_rate_limit, dl_limit_mode)
if len(delete_hashes) > 0:
self.client.delete_torrents(str.join('|', delete_hashes), delete_files)
def calc_and_set_dl_limit(self, keep_disk_space, free_space_on_disk, delete_size, dl_limit_interval,
dl_limit_on_succeeded, dl_rate_limit, dl_limit_mode):
if keep_disk_space > free_space_on_disk + delete_size:
dl_limit = (free_space_on_disk + delete_size) / dl_limit_interval
if dl_limit_on_succeeded and dl_limit > dl_limit_on_succeeded:
dl_limit = dl_limit_on_succeeded
dl_limit = math.floor(dl_limit / 1024) * 1024
if dl_limit != dl_rate_limit:
self.client.set_application_preferences('{{"{}": {}}}'.format(dl_limit_mode, dl_limit))
logger.warning("not enough disk space, set {} to {} KiB/s", dl_limit_mode, dl_limit / 1024)
def _build_delete_hashes(self, delete_hashes, torrent_hashes, all_entry_map, keep_disk_space, free_space_on_disk,
delete_size):
delete_hashes.extend(torrent_hashes)
logger.info('keep_disk_space: {:.2F} GiB, free_space_on_disk: {:.2f} GiB, delete_size: {:.2f} GiB',
keep_disk_space / (1024 * 1024 * 1024), free_space_on_disk / (1024 * 1024 * 1024),
delete_size / (1024 * 1024 * 1024))
entries = []
for torrent_hash in torrent_hashes:
entry = all_entry_map.get(torrent_hash)
entry.accept(reason='torrent with the same save path are all pass tested')
entries.append(entry)
entries.sort(key=lambda e: e['qbittorrent_last_activity'], reverse=True)
for entry in entries:
logger.info(
'{}, size: {:.2f} GiB, seeding_time: {:.2f} h, share_ratio: {:.2f}, last_activity: {}, site: {}',
entry['title'],
entry['qbittorrent_completed'] / (1024 * 1024 * 1024),
entry['qbittorrent_seeding_time'] / (60 * 60),
entry['qbittorrent_share_ratio'],
entry['qbittorrent_last_activity'],
entry['qbittorrent_tags'])
def remove_entries_cleaner(self, task, cleaner_options):
delete_files = cleaner_options.get('delete_files')
delete_hashes = []
delete_files_hashes = []
accepted_entry_hashes = []
main_data_snapshot = self.client.get_main_data_snapshot(id(task))
entry_dict = main_data_snapshot.get('entry_dict')
reseed_dict = main_data_snapshot.get('reseed_dict')
for entry in task.accepted:
accepted_entry_hashes.append(entry['torrent_info_hash'])
for entry_hash in accepted_entry_hashes:
if entry_hash in delete_hashes or entry_hash in delete_files_hashes:
continue
server_entry = entry_dict.get(entry_hash)
if not server_entry:
self.client.reset_rid()
continue
save_path_with_name = server_entry.get('qbittorrent_save_path_with_name')
reseed_entry_list = reseed_dict.get(save_path_with_name)
torrent_hashes = []
for reseed_entry in reseed_entry_list:
torrent_hashes.append(reseed_entry['torrent_info_hash'])
if not set(accepted_entry_hashes) >= set(torrent_hashes):
delete_hashes.extend(set(accepted_entry_hashes) & set(torrent_hashes))
else:
delete_files_hashes.extend(torrent_hashes)
if len(delete_hashes) > 0:
self.client.delete_torrents(str.join('|', delete_hashes), False)
self.print_clean_log(entry_dict, delete_hashes, False)
if len(delete_files_hashes) > 0:
self.client.delete_torrents(str.join('|', delete_files_hashes), delete_files)
self.print_clean_log(entry_dict, delete_files_hashes, delete_files)
def print_clean_log(self, entry_dict, hashes, delete_files):
for torrent_hash in hashes:
entry = entry_dict.get(torrent_hash)
logger.info(
'{}, size: {:.2f} GiB, seeding_time: {:.2f} h, share_ratio: {:.2f}, last_activity: {}, tracker_msg: {}, site: {}, delete_files: {}',
entry['title'],
entry['qbittorrent_completed'] / (1024 * 1024 * 1024),
entry['qbittorrent_seeding_time'] / (60 * 60),
entry['qbittorrent_share_ratio'],
entry['qbittorrent_last_activity'],
entry['qbittorrent_tracker_msg'],
entry['qbittorrent_tags'],
delete_files
)
def resume_entries(self, task, resume_options):
recheck_torrents = resume_options.get('recheck_torrents')
main_data_snapshot = self.client.get_main_data_snapshot(id(task))
reseed_dict = main_data_snapshot.get('reseed_dict')
hashes = []
recheck_hashes = []
for entry in task.accepted:
save_path_with_name = entry['qbittorrent_save_path_with_name']
reseed_entry_list = reseed_dict.get(save_path_with_name)
resume = False
for reseed_entry in reseed_entry_list:
seeding = 'up' in reseed_entry['qbittorrent_state'].lower() and reseed_entry[
'qbittorrent_state'] != 'pausedUP'
if seeding:
hashes.append(entry['torrent_info_hash'])
logger.info('{}', entry['title'])
resume = True
break
if not resume and entry['qbittorrent_state'] != 'checkingUP':
entry.reject(reason='can not find seeding torrent in same save path')
recheck_hashes.append(entry['torrent_info_hash'])
if recheck_torrents and len(recheck_hashes) > 0:
logger.info('recheck {}', recheck_hashes)
self.client.recheck_torrents(str.join('|', recheck_hashes))
self.client.resume_torrents(str.join('|', hashes))
def pause_entries(self, task, pause_options):
if not pause_options:
return
hashes = []
for entry in task.accepted:
hashes.append(entry['torrent_info_hash'])
logger.info('pause: {}', entry['title'])
self.client.pause_torrents(str.join('|', hashes))
def modify_entries(self, task, modify_options):
tag_by_tracker = modify_options.get('tag_by_tracker')
replace_trackers = modify_options.get('replace_trackers')
for entry in task.accepted:
tags = entry.get('qbittorrent_tags')
torrent_trackers = entry.get('qbittorrent_trackers')
for tracker in torrent_trackers:
if tag_by_tracker:
site_name = self._get_site_name(tracker.get('url'))
if site_name and site_name not in tags:
self.client.add_torrent_tags(entry['torrent_info_hash'], site_name)
tags += ', {}'.format(site_name)
logger.info('{} add tag {}', entry.get('title'), site_name)
if replace_trackers:
for orig_url, new_url in replace_trackers.items():
if tracker.get('url') == orig_url:
if new_url:
logger.info('{} update tracker {}', entry.get('title'), new_url)
self.client.edit_trackers(entry.get('torrent_info_hash'), orig_url, new_url)
else:
logger.info('{} remove tracker {}', entry.get('title'), orig_url)
self.client.remove_trackers(entry.get('torrent_info_hash'), orig_url)
def manage_conn_entries(self, task, manage_conn_options):
min_conn = manage_conn_options.get('min')
max_conn = manage_conn_options.get('max')
for entry in task.accepted:
step = entry.get('step')
if not step:
return
server_state = entry.get('server_state')
server_queued_io_jobs = server_state.get('queued_io_jobs')
server_total_peer_connections = server_state.get('total_peer_connections')
application_preferences = self.client.get_application_preferences()
max_connect = application_preferences.get('max_connec')
if max_connect == -1:
max_connect = float('inf')
if (step > 0 and max_connect <= server_total_peer_connections) or step < 0:
max_connect_changed = server_total_peer_connections + step
if max_connect_changed < min_conn:
max_connect_changed = min_conn
elif max_connect_changed > max_conn:
max_connect_changed = max_conn
self.client.set_application_preferences('{{"max_connec": {}}}'.format(max_connect_changed))
logger.info('queued_io_jobs: {} , total_peer_connections: {}, set max_connec to {}',
server_queued_io_jobs, server_total_peer_connections, max_connect_changed)
def limit_upload_by_tracker_entries(self, task, limit_when_not_working_options):
working_speed = limit_when_not_working_options.get('working')
not_working_speed = limit_when_not_working_options.get('not_working')
working_hashes = []
not_working_hashes = []
for entry in task.accepted:
torrent_trackers = entry.get('qbittorrent_trackers')
is_working = False
updating = False
for tracker in torrent_trackers:
status = tracker.get('status')
if status == 2:
is_working = True
elif status == 3:
updating = True
if updating:
continue
up_limit = 0 if entry['qbittorrent_up_limit'] == -1 else entry['qbittorrent_up_limit']
if is_working:
entry_working = entry.get('working') if entry.get('working') else working_speed
if up_limit != entry_working:
if entry.get('working'):
self.client.set_torrent_upload_limit(entry['torrent_info_hash'], entry_working)
else:
working_hashes.append(entry['torrent_info_hash'])
logger.debug(
f'{entry["title"]} site: {entry["qbittorrent_tags"]} tracker is working, set torrent upload limit to {entry_working} B/s')
else:
if up_limit != not_working_speed:
not_working_hashes.append(entry['torrent_info_hash'])
logger.debug(
f'{entry["title"]} site: {entry["qbittorrent_tags"]} tracker is not working, set torrent upload limit to {not_working_speed} B/s')
if working_hashes:
self.client.set_torrent_upload_limit(str.join('|', working_hashes), working_speed)
if not_working_hashes:
self.client.set_torrent_upload_limit(str.join('|', not_working_hashes), not_working_speed)
def _get_site_name(self, tracker_url):
re_object = re.search('(?<=//).*?(?=/)', tracker_url)
if re_object:
domain = re_object.group().split('.')
if len(domain) > 1:
site_name = domain[len(domain) - 2]
if site_name == 'edu':
site_name = domain[len(domain) - 3]
return site_name
def on_task_learn(self, task, config):
""" Make sure all temp files are cleaned up when entries are learned """
# If download plugin is enabled, it will handle cleanup.
if 'download' not in task.config:
download = plugin.get('download', self)
download.cleanup_temp_files(task)
on_task_abort = on_task_learn
@event('plugin.register')
def register_plugin():
plugin.register(PluginQBittorrentMod, 'qbittorrent_mod', api_ver=2)
plugin.register(PluginQBittorrentModInput, 'from_qbittorrent_mod', api_ver=2)
| [
"[email protected]"
] | |
8723a4a6f9bb16968b5f83ec44895b30cb9da123 | d82b879f41e906589a0a6ad5a6a09e0a0032aa3f | /ObservationScripts/on_off/observe_moon_spec_analyser.py | 176f9c75c90dd4f6945052404f93c17615964d9f | [] | no_license | SETIatHCRO/ATA-Utils | 66718eed669882792148fe0b7a2f977cd0f6ac2e | 59f4d21b086effaf41d5e11e338ce602c803cfd0 | refs/heads/master | 2023-08-16T20:41:44.233507 | 2023-08-10T20:39:13 | 2023-08-10T20:39:13 | 137,617,987 | 5 | 5 | null | 2023-08-10T20:39:14 | 2018-06-17T00:07:05 | Jupyter Notebook | UTF-8 | Python | false | false | 867 | py | #!/home/obsuser/miniconda3/envs/ATAobs/bin/python
from ATATools import ata_control, logger_defaults
from SNAPobs import snap_dada, snap_if
import time
import atexit
import numpy as np
import sys
import argparse
import logging
import os
def main():
logger = logger_defaults.getProgramLogger("observe",
loglevel=logging.INFO)
az_offset = 20.
el_offset = 0.
ant_list = ["2b"]
source = "moon"
ata_control.reserve_antennas(ant_list)
atexit.register(ata_control.release_antennas,ant_list, False)
ata_control.create_ephems2(source, az_offset, el_offset)
ata_control.point_ants2(source, "off", ant_list)
#ata_control.autotune(ant_list)
_ = input("Press any key to switch to on source")
ata_control.point_ants2(source, "on", ant_list)
print("on source acquired")
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
b38a5417f4cf586733ab782b41c420ea59c10d53 | 6a084a2df2869ce3ad565610cbf92eccf00a233e | /states/postgres_user.py | c65e8d42284fe1a2a9ce2c6f70f436a95ff64235 | [] | no_license | ltxin/saltstack | 95b5356715cc918afec378e2926d9f9a1c7a85d5 | 30a493ef5e46bd7629c8ba400e559dab023c1431 | refs/heads/master | 2021-01-16T17:52:56.939714 | 2017-08-11T10:13:41 | 2017-08-11T10:13:41 | 100,019,324 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 9,166 | py | # -*- coding: utf-8 -*-
'''
Management of PostgreSQL users (roles)
======================================
The postgres_users module is used to create and manage Postgres users.
.. code-block:: yaml
frank:
postgres_user.present
'''
from __future__ import absolute_import
# Import Python libs
# Import salt libs
import logging
# Salt imports
from salt.modules import postgres
import salt.ext.six as six
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if the postgres module is present
'''
if 'postgres.user_exists' not in __salt__:
return (False, 'Unable to load postgres module. Make sure `postgres.bins_dir` is set.')
return True
def present(name,
createdb=None,
createroles=None,
createuser=None,
encrypted=None,
superuser=None,
replication=None,
inherit=None,
login=None,
password=None,
default_password=None,
refresh_password=None,
groups=None,
user=None,
maintenance_db=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None):
'''
Ensure that the named user is present with the specified privileges
Please note that the user/group notion in postgresql is just abstract, we
have roles, where users can be seens as roles with the LOGIN privilege
and groups the others.
name
The name of the system user to manage.
createdb
Is the user allowed to create databases?
createroles
Is the user allowed to create other users?
createuser
Alias to create roles
encrypted
Should the password be encrypted in the system catalog?
login
Should the group have login perm
inherit
Should the group inherit permissions
superuser
Should the new user be a "superuser"
replication
Should the new user be allowed to initiate streaming replication
password
The system user's password. It can be either a plain string or a
md5 postgresql hashed password::
'md5{MD5OF({password}{role}}'
If encrypted is None or True, the password will be automatically
encrypted to the previous
format if it is not already done.
default_passwoord
The password used only when creating the user, unless password is set.
.. versionadded:: 2016.3.0
refresh_password
Password refresh flag
Boolean attribute to specify whether to password comparison check
should be performed.
If refresh_password is ``True``, the password will be automatically
updated without extra password change check.
This behaviour makes it possible to execute in environments without
superuser access available, e.g. Amazon RDS for PostgreSQL
groups
A string of comma separated groups the user should be in
user
System user all operations should be performed on behalf of
.. versionadded:: 0.17.0
db_user
Postres database username, if different from config or default.
db_password
Postgres user's password, if any password, for a specified db_user.
db_host
Postgres database host, if different from config or default.
db_port
Postgres database port, if different from config or default.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'User {0} is already present'.format(name)}
if createuser:
createroles = True
# default to encrypted passwords
if encrypted is not False:
encrypted = postgres._DEFAULT_PASSWORDS_ENCRYPTION
# maybe encrypt if if not already and necessary
password = postgres._maybe_encrypt_password(name,
password,
encrypted=encrypted)
if default_password is not None:
default_password = postgres._maybe_encrypt_password(name,
default_password,
encrypted=encrypted)
db_args = {
'maintenance_db': maintenance_db,
'runas': user,
'host': db_host,
'user': db_user,
'port': db_port,
'password': db_password,
}
# check if user exists
mode = 'create'
user_attr = __salt__['postgres.role_get'](
name, return_password=not refresh_password, **db_args)
if user_attr is not None:
mode = 'update'
# The user is not present, make it!
cret = None
update = {}
if mode == 'update':
user_groups = user_attr.get('groups', [])
if (
createdb is not None
and user_attr['can create databases'] != createdb
):
update['createdb'] = createdb
if (
inherit is not None
and user_attr['inherits privileges'] != inherit
):
update['inherit'] = inherit
if login is not None and user_attr['can login'] != login:
update['login'] = login
if (
createroles is not None
and user_attr['can create roles'] != createroles
):
update['createroles'] = createroles
if (
replication is not None
and user_attr['replication'] != replication
):
update['replication'] = replication
if superuser is not None and user_attr['superuser'] != superuser:
update['superuser'] = superuser
if password is not None and (refresh_password or user_attr['password'] != password):
update['password'] = True
if groups is not None:
lgroups = groups
if isinstance(groups, (six.string_types, six.text_type)):
lgroups = lgroups.split(',')
if isinstance(lgroups, list):
missing_groups = [a for a in lgroups if a not in user_groups]
if missing_groups:
update['groups'] = missing_groups
if mode == 'create' and password is None:
password = default_password
if mode == 'create' or (mode == 'update' and update):
if __opts__['test']:
if update:
ret['changes'][name] = update
ret['result'] = None
ret['comment'] = 'User {0} is set to be {1}d'.format(name, mode)
return ret
cret = __salt__['postgres.user_{0}'.format(mode)](
username=name,
createdb=createdb,
createroles=createroles,
encrypted=encrypted,
superuser=superuser,
login=login,
inherit=inherit,
replication=replication,
rolepassword=password,
groups=groups,
**db_args)
else:
cret = None
if cret:
ret['comment'] = 'The user {0} has been {1}d'.format(name, mode)
if update:
ret['changes'][name] = update
else:
ret['changes'][name] = 'Present'
elif cret is not None:
ret['comment'] = 'Failed to create user {0}'.format(name)
ret['result'] = False
else:
ret['result'] = True
return ret
def absent(name,
user=None,
maintenance_db=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None):
'''
Ensure that the named user is absent
name
The username of the user to remove
user
System user all operations should be performed on behalf of
.. versionadded:: 0.17.0
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
db_args = {
'maintenance_db': maintenance_db,
'runas': user,
'host': db_host,
'user': db_user,
'port': db_port,
'password': db_password,
}
# check if user exists and remove it
if __salt__['postgres.user_exists'](name, **db_args):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'User {0} is set to be removed'.format(name)
return ret
if __salt__['postgres.user_remove'](name, **db_args):
ret['comment'] = 'User {0} has been removed'.format(name)
ret['changes'][name] = 'Absent'
return ret
else:
ret['result'] = False
ret['comment'] = 'User {0} failed to be removed'.format(name)
return ret
else:
ret['comment'] = 'User {0} is not present, so it cannot ' \
'be removed'.format(name)
return ret
| [
"[email protected]"
] | |
567c4f1b87268b45b3e5955082e71554b4e4551e | e3abb55ba514fb102ce01601ab0e9ebc15f5d26f | /code/l010_await.py | 1c1c6228bf6292b72ebae15c80d040f4c8a0b5a4 | [] | no_license | joshmarshall/coroutine-presentation | 1d8dec7a6c31a0ee5e8875883a326ea801300e93 | a6d07e70bdff286f45785f4127d854ea701a6a08 | refs/heads/master | 2023-09-03T04:23:20.422823 | 2018-01-03T10:19:50 | 2018-01-03T10:19:50 | 64,452,999 | 1 | 0 | null | 2017-11-19T21:17:58 | 2016-07-29T05:29:08 | Python | UTF-8 | Python | false | false | 1,248 | py | import asyncio
class Session(object):
@classmethod
def connect(cls):
return Session()
async def __aenter__(self):
print("Creating session...")
await asyncio.sleep(1)
return self
async def __aexit__(self, exc_typ, exc, tb):
# can also handle exceptions as necessary
await asyncio.sleep(1)
print("Disconnected.")
async def __aiter__(self):
self.records = [Record(), Record()]
return self
async def __anext__(self):
print("Finding record...")
await asyncio.sleep(1)
if not self.records:
raise StopAsyncIteration()
return self.records.pop(0)
def find(self):
return self
class Record(object):
async def update(self, **kwargs):
await asyncio.sleep(1)
print("Updating record: {0}".format(kwargs))
async def wait():
async with Session.connect() as session:
i = 0
async for record in session.find():
i += 1
await record.update(foo=i)
def main():
loop = asyncio.get_event_loop()
print("Starting...")
loop.run_until_complete(wait())
print("Finishing...")
loop.close()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
9967bfbb48682fff74e8fa93da453b918a2d908b | 43715a10381ec37c275850c2e4f5302cde18de8c | /rooms/models.py | 8544758b5c7d49ad504a4a43c4f38656f611174b | [] | no_license | dongdong-e/airbnb-clone | 443f290baca4ea5c8f22f6c573383d11de4140f4 | 32c083c4e7f562d968639099d8439f26a666b175 | refs/heads/master | 2023-05-02T22:08:32.232594 | 2019-11-25T12:13:13 | 2019-11-25T12:13:13 | 219,305,006 | 0 | 0 | null | 2023-04-21T20:42:00 | 2019-11-03T13:27:34 | Python | UTF-8 | Python | false | false | 2,842 | py | from django.db import models
from django.urls import reverse
from django_countries.fields import CountryField
from core import models as core_models
class AbstractItem(core_models.TimeStampedModel):
""" Abstract Item """
name = models.CharField(max_length=80)
class Meta:
abstract = True
def __str__(self):
return self.name
class RoomType(AbstractItem):
""" RoomType Model Definition """
class Meta:
verbose_name = "Room Type"
ordering = ["name"]
class Amenity(AbstractItem):
""" Amenity Model Definition """
class Meta:
verbose_name_plural = "Amenities"
class Facility(AbstractItem):
""" Facility Model Definition """
class Meta:
verbose_name_plural = "Facilities"
class HouseRule(AbstractItem):
""" HouseRule Model Definition """
class Meta:
verbose_name = "House Rule"
class Photo(core_models.TimeStampedModel):
""" Photo Model Definition """
caption = models.CharField(max_length=80)
file = models.ImageField(upload_to="room_photos")
room = models.ForeignKey("Room", related_name="photos", on_delete=models.CASCADE)
def __str__(self):
return self.caption
class Room(core_models.TimeStampedModel):
""" Room Model Definition """
name = models.CharField(max_length=140)
description = models.TextField()
country = CountryField()
city = models.CharField(max_length=80)
price = models.IntegerField()
address = models.CharField(max_length=140)
guests = models.IntegerField()
beds = models.IntegerField()
bedrooms = models.IntegerField()
baths = models.IntegerField()
check_in = models.TimeField()
check_out = models.TimeField()
instant_book = models.BooleanField(default=False)
host = models.ForeignKey(
"users.User", related_name="rooms", on_delete=models.CASCADE
)
room_type = models.ForeignKey(
"RoomType", related_name="rooms", on_delete=models.SET_NULL, null=True
)
amenities = models.ManyToManyField("Amenity", related_name="rooms", blank=True)
facilities = models.ManyToManyField("Facility", related_name="rooms", blank=True)
house_rules = models.ManyToManyField("HouseRule", related_name="rooms", blank=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.city = str.capitalize(self.city)
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse("rooms:detail", kwargs={"pk": self.pk})
def total_rating(self):
all_reviews = self.reviews.all()
all_ratings = 0
if len(all_reviews) > 0:
for review in all_reviews:
all_ratings += review.rating_average()
return round(all_ratings / len(all_reviews), 2)
return 0
| [
"[email protected]"
] | |
7ab917ac2d5b6dbd613df8ad73eaa04c6fd703b9 | e042a2437aa60fdc966c4bb97d87f27fb6378c9c | /vae-mnist/utils.py | cbc53886b453559793ea1d4b8a743196b76eca8f | [] | no_license | JZDBB/OOC-for-research | a8653f69a01fe9edd024411234ca422e220a437f | 265fbd1732460acbe2a36f4273635485abf0eb0c | refs/heads/master | 2020-07-04T04:08:51.130198 | 2019-08-21T13:00:38 | 2019-08-21T13:00:38 | 202,150,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | import numpy as np
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx / size[1]
img[j*h:j*h+h, i*w:i*w+w] = image
return img
| [
"[email protected]"
] | |
7d167e1feb92203517a6bf08e8597b19369c565e | 42ffa887ca0ac7b54f0473880613865fe523fbfc | /src/viztracer/__init__.py | 38fd0acde24ec07503595c6da251f4e74a45e921 | [
"Apache-2.0"
] | permissive | tianxie1989/viztracer | e61090ac286a5b4ffe4c8f0265fde38bca68837b | 39a6314b2a5a30ede71be96bd5e174b2bdaa2664 | refs/heads/master | 2022-12-11T08:21:25.415858 | 2020-08-21T00:21:00 | 2020-08-21T00:21:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/gaogaotiantian/viztracer/blob/master/NOTICE.txt
from .viztracer import VizTracer
from .flamegraph import FlameGraph
from .decorator import ignore_function
__version__ = "0.3.0"
__all__ = [
"__version__",
"VizTracer",
"FlameGraph",
"ignore_function"
]
| [
"[email protected]"
] | |
d2a67d571a6ae128e18235f827a76b271bc6e6e8 | cbd2eee46663fad5b5375b13c8c21b1b06eb4c6b | /ecloud/code/src/main/python/manor/streamlet/create_nodes.py | 159486c27b7fd7132e26361dfada9a5c35673aba | [] | no_license | 1026237416/Python | ef474ee40d7efcd6dabb6fb0ecba81b4dcfc7e14 | ffa8f9ffb8bfec114b0ca46295db05c4213c4c30 | refs/heads/master | 2021-07-05T00:57:00.456886 | 2019-04-26T10:13:46 | 2019-04-26T10:13:46 | 114,510,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,090 | py | # coding=utf-8
import yaml
from tornado import gen
from manor.screwdriver import stack_util
from manor.screwdriver.vendor_ecloud import list_app_resources
from manor.streamlet import StreamletBase,get_stack_resources
from manor.streamlet import download_path
from manor.util import generals
from manor.util import redis_tool
SUCCESS_FLAG='CREATE_COMPLETE'
CREATING_FLAG='CREATE_IN_PROGRESS'
def get_instance(params,node_id,serial):
return CreateNodes(params,serial,node_id)
class CreateNodes(StreamletBase):
def __init__(self,params,serial,node_id):
super(CreateNodes,self).__init__(node_id,params,serial)
self.result=None
self.created_resources=[]
self.stack_status=''
self.ips=[]
@gen.coroutine
def execute(self):
if not self.executed:
self.executed=True
# todo: check input parameters...
self.log.debug('params:')
self.log.debug(self.params)
data_module={
'name':'create node',
'resources':{},
'group_name':self.get_resource('group_name')
}
self.log.debug('calculate data module ..')
try:
if self.get_resource('group_name')=='':
raise Exception('group name is empty.')
if self.get_resource('max')!='':
_max=int(self.get_resource('max'))
group_name=self.get_resource('group_name')
rs=yield list_app_resources(self.serial)
rs=[_ for _ in rs if _['group_name']==group_name]
if len(rs)>=_max:
raise Exception('manor.create.node.upper.limited')
os_name=yield download_path(self.get_resource('image'))
data_module['resources'][self.get_resource('group_name')]={
"count":self.get_resource('amount'),
"group_name":self.get_resource('group_name'),
"image":self.get_resource('image'),
'flavor':self.get_resource('flavors'),
"memory":self.get_resource('memory'),
"cores":self.get_resource('cores'),
'tenant':self.get_resource('tenant'),
'size':self.get_resource('disk_capacity'),
"os":os_name,
"network":[
{
"network":self.get_resource('network'),
"subnet":self.get_resource('subnet')
}
]
}
self.log.debug(data_module)
self.stack_id=yield stack_util.create_action(data_module,
self.serial)
except Exception as e:
self.log.error(generals.trace())
raise e
@gen.coroutine
def calculate_created_resources(self):
resources=yield get_stack_resources(self.stack_id)
self.log.debug('calculate created:\n %s'%yaml.safe_dump(resources))
self.created_resources=resources
@gen.coroutine
def get_stack_status(self):
future=yield stack_util.get_stack(self.stack_id)
self.stack_status=future.to_dict()['stack_status']
def get_resource(self,key):
if key in self.params:
return self.params[key]
else:
return ''
def __ips_not_in_road_map(self,ips):
return [_ for _ in ips if _ not in self.__get_road_map()]
def __get_road_map(self):
r=redis_tool.get_it()
road_map=r.keys('mapup*')
return [_.split('_$_')[3] for _ in road_map]
def check_finish(self):
"""
注意,此方法运行在一个线程中,每秒会执行一次。
"""
try:
self.log.debug('create_nodes step. check finish. stack_id %s'%
self.stack_id)
if self.stack_id is None:
return False
if self.stack_status!=CREATING_FLAG:
if self.stack_status==SUCCESS_FLAG:
if len(self.created_resources)==0:
self.calculate_created_resources()
if len(self.ips)==0:
self.ips=[_['ip'] for _ in self.created_resources]
checked=[_ for _ in self.ips if _ in self.__get_road_map()]
self.log.debug('%s - %s'%(self.ips,checked))
if len(self.ips)>0 and self.ips==checked:
return True
else:
return False
else:
self.get_stack_status()
else:
self.log.debug('the stack stack_status is %s'%self.stack_status)
self.get_stack_status()
return False
except:
self.log.error(generals.trace())
raise Exception('error.manor.stream.check.create.node.finish')
| [
"[email protected]"
] | |
8f1829ee69b87b02cc106601fc364e928bd4864f | 6275b8eee6f8f0f69c1f7d1b74a82db22329d560 | /src/train_v4.py | fe583be223d6b303b0b94e9af04e688c97169fb1 | [
"MIT"
] | permissive | khodwe56/kaggle-birdsong-recognition | 081575ea02e663f98292c5e579c14de4bcdb7e22 | 95a902c37355619cf02558968f000038e487db47 | refs/heads/master | 2023-01-01T21:35:20.101880 | 2020-10-27T17:03:06 | 2020-10-27T17:03:06 | 299,716,450 | 0 | 0 | MIT | 2020-09-29T19:21:48 | 2020-09-29T19:21:47 | null | UTF-8 | Python | false | false | 721 | py | from argparse import ArgumentParser, Namespace
from engine.main_engine_v4 import MainEngineV4
import importlib
import torch
import ignite.distributed as idist
torch.backends.cudnn.benchmark = True
def run(local_rank, config):
pe = MainEngineV4(local_rank, config)
pe.train(config.run_params)
def main(hyperparams):
with idist.Parallel(**hyperparams.dist_params) as parallel:
parallel.run(run, hyperparams)
if __name__ == '__main__':
parser = ArgumentParser(parents=[])
parser.add_argument('--config', type=str)
params = parser.parse_args()
module = importlib.import_module(params.config, package=None)
hyperparams = module.Parameters()
main(hyperparams) | [
"[email protected]"
] | |
f74296653aa5f909d55be6b01db02cd11a8f0142 | 69533190b829ae8d37fe87e6990ecb9cc250bef3 | /old/teach_pendant/switch_map.py | d91d5db1a81cd2eaa23f0f5cc8e4f22691e1cba2 | [] | no_license | chxb1987/idx6dof | a3ebd70d9901845b3a72f611e021caaba8814602 | b6a2a1b79673cdc3d929c469116ff4eaf3f7583d | refs/heads/master | 2020-08-03T21:46:51.620409 | 2017-06-14T20:50:22 | 2017-06-14T20:50:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,074 | py | SWITCH_UP=1
SWITCH_DOWN=3
sw_map = (
( 4, 2, SWITCH_UP),
( 12, 2, SWITCH_DOWN),
( 16, 17, SWITCH_UP),
( 17, 16, SWITCH_UP),
( 18, 15, SWITCH_UP),
( 19, 14, SWITCH_UP),
( 20, 13, SWITCH_UP),
( 21, 12, SWITCH_UP),
( 22, 10, SWITCH_UP),
( 23, 11, SWITCH_UP),
( 24, 17, SWITCH_DOWN),
( 25, 16, SWITCH_DOWN),
( 26, 15, SWITCH_DOWN),
( 27, 14, SWITCH_DOWN),
( 28, 13, SWITCH_DOWN),
( 29, 12, SWITCH_DOWN),
( 30, 10, SWITCH_DOWN),
( 31, 11, SWITCH_DOWN),
( 32, 7, SWITCH_UP),
( 33, 6, SWITCH_UP),
( 34, 5, SWITCH_UP),
( 35, 4, SWITCH_UP),
( 36, 3, SWITCH_UP),
( 37, 8, SWITCH_UP),
( 38, 1, SWITCH_UP),
( 39, 9, SWITCH_UP),
( 40, 7, SWITCH_DOWN),
( 41, 6, SWITCH_DOWN),
( 42, 5, SWITCH_DOWN),
( 43, 4, SWITCH_DOWN),
( 44, 3, SWITCH_DOWN),
( 45, 8, SWITCH_DOWN),
( 46, 1, SWITCH_DOWN),
( 47, 9, SWITCH_DOWN),
)
for sw_code, sw_n, sw_pos in sw_map:
if sw_pos == SWITCH_UP:
vn = 'this->swbits_ups'
mn = 'SET_SW_UP'
else:
vn = 'this->swbits_downs'
mn = 'SET_SW_DOWN'
print "case {sw_code}: {mn}({sw_n}); break; ".format(**locals()) | [
"[email protected]"
] | |
9c5ae5f21eb5f1a36093fe5f764a1835128a01d2 | dc67e70a303f265ee6cb4c1a2d61fe811053fb3d | /beginner/095/C.py | e641e597678f29556c9fceffadc8270b970f8ac8 | [] | no_license | cry999/AtCoder | d39ce22d49dfce805cb7bab9d1ff0dd21825823a | 879d0e43e3fac0aadc4d772dc57374ae72571fe6 | refs/heads/master | 2020-04-23T13:55:00.018156 | 2019-12-11T05:23:03 | 2019-12-11T05:23:03 | 171,214,066 | 0 | 0 | null | 2019-05-13T15:17:02 | 2019-02-18T04:24:01 | Python | UTF-8 | Python | false | false | 623 | py | def half_and_half(
A: int, B: int, C: int, X: int, Y: int) -> int:
"""
:param A: A ピザの値段
:param B: B ピザの値段
:param C: AB ピザの値段
:param X: A ピザの必要数
:param Y: B ピザの必要数
"""
min_price = float('inf')
for num_ab in range(max(X, Y)+1):
num_a, num_b = max(0, X-num_ab), max(0, Y-num_ab)
price = num_a*A + num_b*B + 2*num_ab*C
min_price = min(min_price, price)
return min_price
if __name__ == "__main__":
A, B, C, X, Y = map(int, input().split())
ans = half_and_half(A, B, C, X, Y)
print(ans)
| [
"[email protected]"
] | |
db520c55803ce3ffeb97f5b339bc73d74fb711f0 | cb40aad84a35856ce5a8285ea7260f4183b1dd7a | /tests/model/test_properties.py | 686bc3f6503e24b4cfda6093606dd26cd1f7e118 | [
"Apache-2.0",
"MIT"
] | permissive | vyahello/trump-bullet-game | f71f2fe86a92ba89ea82af5cfecab504b13576d0 | 7648f9722471323ddec1aa6b6d7db38166bebc91 | refs/heads/master | 2021-09-08T09:31:49.459350 | 2021-08-29T08:26:14 | 2021-08-29T08:40:40 | 167,864,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,974 | py | from typing import Tuple
import pytest
from app.model.properties import GameProperty, Color, Resolution, Border
from app import PropertyError
_rdba_color: Tuple[int, ...] = (1, 2, 3)
_resolution: Tuple[int, ...] = (10, 20)
_bottom: int = 5
def test_property_coordinates() -> None:
assert len(GameProperty.coordinates()) == 4
def test_calculate_jumper() -> None:
assert GameProperty.calculate_jumper() == 50
def test_color_as_rgba(color: Color) -> None:
assert color.as_rgba() == _rdba_color
def test_resolution_as_sequence(resolution: Resolution) -> None:
assert resolution.as_sequence() == _resolution
def test_resolution_top_height(resolution: Resolution) -> None:
assert resolution.top_height == _resolution[0]
def test_resolution_top_width(resolution: Resolution) -> None:
assert resolution.top_width == _resolution[1]
def test_resolution_bottom(resolution: Resolution) -> None:
assert resolution.bottom == _bottom
def test_border_is_top_left(screen_border: Border) -> None:
assert screen_border.is_top_left(10)
def test_border_is_top_right(screen_border: Border) -> None:
assert screen_border.is_top_right(10, 2)
def test_border_is_top_upper(screen_border: Border) -> None:
assert screen_border.is_top_upper(15)
def test_border_is_top_lower(screen_border: Border) -> None:
assert screen_border.is_top_lower(3, -10)
def test_border_is_not_top_left(screen_border: Border) -> None:
assert not screen_border.is_top_left(1)
def test_border_is_not_top_right(screen_border: Border) -> None:
assert not screen_border.is_top_right(30, 3)
def test_border_is_not_top_upper(screen_border: Border) -> None:
assert not screen_border.is_top_upper(1)
def test_border_is_not_top_lower(screen_border: Border) -> None:
assert not screen_border.is_top_lower(15, 2)
def test_resolution_error() -> None:
with pytest.raises(PropertyError):
Resolution(resolution=(0, 0, 0)).as_sequence()
| [
"[email protected]"
] | |
ba55aa07f86bf85d7f55d854a6d3e64096f4000b | d80ef8c716bcc5ea54e87540dbf0463f15bf44ce | /libmproxy/contrib/wbxml/InvalidDataException.py | 67f8ea93014bc2aaf814f9995cc5861007b63caf | [
"MIT",
"BSD-3-Clause"
] | permissive | YagiGo/YPTN | 5043d22eb131c7164d3fa575f0c4e3d8a963dbf4 | d7692a68ee1bf578536b4c09c566272210fc8b69 | refs/heads/master | 2018-10-16T03:44:18.024169 | 2018-07-24T08:53:57 | 2018-07-24T08:53:57 | 107,633,669 | 4 | 1 | MIT | 2018-06-08T09:04:29 | 2017-10-20T04:55:22 | JavaScript | UTF-8 | Python | false | false | 1,333 | py | #!/usr/bin/env python
'''
@author: David Shaw, [email protected]
Inspired by EAS Inspector for Fiddler
https://easinspectorforfiddler.codeplex.com
----- The MIT License (MIT) -----
Filename: InvalidDataException.py
Copyright (c) 2014, David P. Shaw
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
class InvalidDataException(Exception):
pass | [
"[email protected]"
] | |
4ed59eebd6c684deb8e7f456b283309a733233df | 0942f23ffad253850099d7b994415ba3ab50d896 | /pyinstaller/PyInstaller/build.py | 44ace10c67ccba3daa0f291034dfae8d36ea8776 | [] | no_license | fabiomdiniz/Frey | d5fa09c67c82201d8f6a6df61e23f24b1e71c923 | 6d1f133b33afb4e810737e1690f89e1faf9ae0ee | refs/heads/master | 2020-05-17T15:48:33.106859 | 2013-11-05T14:25:02 | 2013-11-05T14:25:02 | 2,523,746 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 63,105 | py | #
# Copyright (C) 2005, Giovanni Bajo
# Based on previous work under copyright (c) 1999, 2002 McMillan Enterprises, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# Build packages using spec files.
import sys
import os
import shutil
import pprint
import py_compile
import imp
import tempfile
import UserList
import bindepend
from PyInstaller.loader import pyi_archive, pyi_carchive
import PyInstaller.depend.imptracker
import PyInstaller.depend.modules
from PyInstaller import HOMEPATH, CONFIGDIR, PLATFORM
from PyInstaller.compat import is_win, is_unix, is_aix, is_darwin, is_cygwin
import PyInstaller.compat as compat
from PyInstaller.compat import hashlib
from PyInstaller.depend import dylib
from PyInstaller.utils import misc
import PyInstaller.log as logging
if is_win:
from PyInstaller.utils import winmanifest
logger = logging.getLogger(__name__)
STRINGTYPE = type('')
TUPLETYPE = type((None,))
UNCOMPRESSED, COMPRESSED = range(2)
DEFAULT_BUILDPATH = os.path.join('SPECPATH', 'build',
'pyi.TARGET_PLATFORM', 'SPECNAME')
SPEC = None
SPECPATH = None
BUILDPATH = None
WARNFILE = None
NOCONFIRM = None
# Some modules are included if they are detected at build-time or
# if a command-line argument is specified. (e.g. --ascii)
HIDDENIMPORTS = []
rthooks = {}
def _save_data(filename, data):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
outf = open(filename, 'w')
pprint.pprint(data, outf)
outf.close()
def _load_data(filename):
return eval(open(filename, 'rU').read())
def setupUPXFlags():
f = compat.getenv("UPX", "")
if is_win:
# Binaries built with Visual Studio 7.1 require --strip-loadconf
# or they won't compress. Configure.py makes sure that UPX is new
# enough to support --strip-loadconf.
f = "--strip-loadconf " + f
# Do not compress any icon, so that additional icons in the executable
# can still be externally bound
f = "--compress-icons=0 " + f
f = "--best " + f
compat.setenv("UPX", f)
def mtime(fnm):
try:
return os.stat(fnm)[8]
except:
return 0
def absnormpath(apath):
return os.path.abspath(os.path.normpath(apath))
def compile_pycos(toc):
"""Given a TOC or equivalent list of tuples, generates all the required
pyc/pyo files, writing in a local directory if required, and returns the
list of tuples with the updated pathnames.
"""
global BUILDPATH
# For those modules that need to be rebuilt, use the build directory
# PyInstaller creates during the build process.
basepath = os.path.join(BUILDPATH, "localpycos")
new_toc = []
for (nm, fnm, typ) in toc:
if typ != 'PYMODULE':
new_toc.append((nm, fnm, typ))
continue
# Trim the terminal "c" or "o"
source_fnm = fnm[:-1]
# We need to perform a build ourselves if the source is newer
# than the compiled, or the compiled doesn't exist, or if it
# has been written by a different Python version.
needs_compile = (mtime(source_fnm) > mtime(fnm)
or
open(fnm, 'rb').read()[:4] != imp.get_magic())
if needs_compile:
try:
py_compile.compile(source_fnm, fnm)
logger.debug("compiled %s", source_fnm)
except IOError:
# If we're compiling on a system directory, probably we don't
# have write permissions; thus we compile to a local directory
# and change the TOC entry accordingly.
ext = os.path.splitext(fnm)[1]
if "__init__" not in fnm:
# If it's a normal module, use last part of the qualified
# name as module name and the first as leading path
leading, mod_name = nm.split(".")[:-1], nm.split(".")[-1]
else:
# In case of a __init__ module, use all the qualified name
# as leading path and use "__init__" as the module name
leading, mod_name = nm.split("."), "__init__"
leading = os.path.join(basepath, *leading)
if not os.path.exists(leading):
os.makedirs(leading)
fnm = os.path.join(leading, mod_name + ext)
needs_compile = (mtime(source_fnm) > mtime(fnm)
or
open(fnm, 'rb').read()[:4] != imp.get_magic())
if needs_compile:
py_compile.compile(source_fnm, fnm)
logger.debug("compiled %s", source_fnm)
new_toc.append((nm, fnm, typ))
return new_toc
def addSuffixToExtensions(toc):
"""
Returns a new TOC with proper library suffix for EXTENSION items.
"""
new_toc = TOC()
for inm, fnm, typ in toc:
if typ in ('EXTENSION', 'DEPENDENCY'):
binext = os.path.splitext(fnm)[1]
if not os.path.splitext(inm)[1] == binext:
inm = inm + binext
new_toc.append((inm, fnm, typ))
return new_toc
#--- functons for checking guts ---
def _check_guts_eq(attr, old, new, last_build):
"""
rebuild is required if values differ
"""
if old != new:
logger.info("building because %s changed", attr)
return True
return False
def _check_guts_toc_mtime(attr, old, toc, last_build, pyc=0):
"""
rebuild is required if mtimes of files listed in old toc are newer
than ast_build
if pyc=1, check for .py files, too
"""
for (nm, fnm, typ) in old:
if mtime(fnm) > last_build:
logger.info("building because %s changed", fnm)
return True
elif pyc and mtime(fnm[:-1]) > last_build:
logger.info("building because %s changed", fnm[:-1])
return True
return False
def _check_guts_toc(attr, old, toc, last_build, pyc=0):
"""
rebuild is required if either toc content changed if mtimes of
files listed in old toc are newer than ast_build
if pyc=1, check for .py files, too
"""
return (_check_guts_eq(attr, old, toc, last_build)
or _check_guts_toc_mtime(attr, old, toc, last_build, pyc=pyc))
def _check_path_overlap(path):
"""
Check that path does not overlap with BUILDPATH or SPECPATH (i.e.
BUILDPATH and SPECPATH may not start with path, which could be
caused by a faulty hand-edited specfile)
Raise SystemExit if there is overlap, return True otherwise
"""
specerr = 0
if BUILDPATH.startswith(path):
logger.error('Specfile error: The output path "%s" contains '
'BUILDPATH (%s)', path, BUILDPATH)
specerr += 1
if SPECPATH.startswith(path):
logger.error('Specfile error: The output path "%s" contains '
'SPECPATH (%s)', path, SPECPATH)
specerr += 1
if specerr:
raise SystemExit('Error: Please edit/recreate the specfile (%s) '
'and set a different output name (e.g. "dist").'
% SPEC)
return True
def _rmtree(path):
"""
Remove directory and all its contents, but only after user confirmation,
or if the -y option is set
"""
if NOCONFIRM:
choice = 'y'
elif sys.stdout.isatty():
choice = raw_input('WARNING: The output directory "%s" and ALL ITS '
'CONTENTS will be REMOVED! Continue? (y/n)' % path)
else:
raise SystemExit('Error: The output directory "%s" is not empty. '
'Please remove all its contents or use the '
'-y option (remove output directory without '
'confirmation).' % path)
if choice.strip().lower() == 'y':
logger.info('Removing dir %s', path)
shutil.rmtree(path)
else:
raise SystemExit('User aborted')
def check_egg(pth):
"""Check if path points to a file inside a python egg file (or to an egg
directly)."""
if os.path.altsep:
pth = pth.replace(os.path.altsep, os.path.sep)
components = pth.split(os.path.sep)
sep = os.path.sep
for i, name in zip(range(0, len(components)), components):
if name.lower().endswith(".egg"):
eggpth = sep.join(components[:i + 1])
if os.path.isfile(eggpth):
# eggs can also be directories!
return True
return False
#--
class Target:
invcnum = 0
def __init__(self):
# Get a (per class) unique number to avoid conflicts between
# toc objects
self.invcnum = self.__class__.invcnum
self.__class__.invcnum += 1
self.out = os.path.join(BUILDPATH, 'out%02d-%s.toc' %
(self.invcnum, self.__class__.__name__))
self.outnm = os.path.basename(self.out)
self.dependencies = TOC()
def __postinit__(self):
logger.info("checking %s", self.__class__.__name__)
if self.check_guts(mtime(self.out)):
self.assemble()
GUTS = []
def check_guts(self, last_build):
pass
def get_guts(self, last_build, missing='missing or bad'):
"""
returns None if guts have changed
"""
try:
data = _load_data(self.out)
except:
logger.info("building because %s %s", os.path.basename(self.out), missing)
return None
if len(data) != len(self.GUTS):
logger.info("building because %s is bad", self.outnm)
return None
for i, (attr, func) in enumerate(self.GUTS):
if func is None:
# no check for this value
continue
if func(attr, data[i], getattr(self, attr), last_build):
return None
return data
class Analysis(Target):
_old_scripts = set((
absnormpath(os.path.join(HOMEPATH, "support", "_mountzlib.py")),
absnormpath(os.path.join(CONFIGDIR, "support", "useUnicode.py")),
absnormpath(os.path.join(CONFIGDIR, "support", "useTK.py")),
absnormpath(os.path.join(HOMEPATH, "support", "useUnicode.py")),
absnormpath(os.path.join(HOMEPATH, "support", "useTK.py")),
absnormpath(os.path.join(HOMEPATH, "support", "unpackTK.py")),
absnormpath(os.path.join(HOMEPATH, "support", "removeTK.py")),
))
def __init__(self, scripts=None, pathex=None, hiddenimports=None,
hookspath=None, excludes=None):
Target.__init__(self)
# Include initialization Python code in PyInstaller analysis.
_init_code_path = os.path.join(HOMEPATH, 'PyInstaller', 'loader')
self.inputs = [
os.path.join(HOMEPATH, "support", "_pyi_bootstrap.py"),
os.path.join(_init_code_path, 'pyi_archive.py'),
os.path.join(_init_code_path, 'pyi_carchive.py'),
os.path.join(_init_code_path, 'pyi_iu.py'),
]
for script in scripts:
if absnormpath(script) in self._old_scripts:
logger.warn('Ignoring obsolete auto-added script %s', script)
continue
if not os.path.exists(script):
raise ValueError("script '%s' not found" % script)
self.inputs.append(script)
self.pathex = []
if pathex:
self.pathex = [absnormpath(path) for path in pathex]
self.hiddenimports = hiddenimports or []
# Include modules detected at build time. Like 'codecs' and encodings.
self.hiddenimports.extend(HIDDENIMPORTS)
self.hookspath = hookspath
self.excludes = excludes
self.scripts = TOC()
self.pure = TOC()
self.binaries = TOC()
self.zipfiles = TOC()
self.datas = TOC()
self.dependencies = TOC()
self.__postinit__()
GUTS = (('inputs', _check_guts_eq),
('pathex', _check_guts_eq),
('hookspath', _check_guts_eq),
('excludes', _check_guts_eq),
('scripts', _check_guts_toc_mtime),
('pure', lambda *args: apply(_check_guts_toc_mtime,
args, {'pyc': 1})),
('binaries', _check_guts_toc_mtime),
('zipfiles', _check_guts_toc_mtime),
('datas', _check_guts_toc_mtime),
('hiddenimports', _check_guts_eq),
)
def check_guts(self, last_build):
if last_build == 0:
logger.info("building %s because %s non existent", self.__class__.__name__, self.outnm)
return True
for fnm in self.inputs:
if mtime(fnm) > last_build:
logger.info("building because %s changed", fnm)
return True
data = Target.get_guts(self, last_build)
if not data:
return True
scripts, pure, binaries, zipfiles, datas, hiddenimports = data[-6:]
self.scripts = TOC(scripts)
self.pure = TOC(pure)
self.binaries = TOC(binaries)
self.zipfiles = TOC(zipfiles)
self.datas = TOC(datas)
self.hiddenimports = hiddenimports
return False
def assemble(self):
logger.info("running Analysis %s", os.path.basename(self.out))
# Reset seen variable to correctly discover dependencies
# if there are multiple Analysis in a single specfile.
bindepend.seen = {}
python = sys.executable
if not is_win:
while os.path.islink(python):
python = os.path.join(os.path.dirname(python), os.readlink(python))
depmanifest = None
else:
depmanifest = winmanifest.Manifest(type_="win32", name=specnm,
processorArchitecture=winmanifest.processor_architecture(),
version=(1, 0, 0, 0))
depmanifest.filename = os.path.join(BUILDPATH,
specnm + ".exe.manifest")
binaries = [] # binaries to bundle
# Always add Python's dependencies first
# This ensures that its assembly depencies under Windows get pulled in
# first, so that .pyd files analyzed later which may not have their own
# manifest and may depend on DLLs which are part of an assembly
# referenced by Python's manifest, don't cause 'lib not found' messages
binaries.extend(bindepend.Dependencies([('', python, '')],
manifest=depmanifest)[1:])
###################################################
# Scan inputs and prepare:
dirs = {} # input directories
pynms = [] # python filenames with no extension
for script in self.inputs:
if not os.path.exists(script):
raise SystemExit("Error: Analysis: script %s not found!" % script)
d, base = os.path.split(script)
if not d:
d = os.getcwd()
d = absnormpath(d)
pynm, ext = os.path.splitext(base)
dirs[d] = 1
pynms.append(pynm)
###################################################
# Initialize importTracker and analyze scripts
importTracker = PyInstaller.depend.imptracker.ImportTracker(
dirs.keys() + self.pathex, self.hookspath, self.excludes)
PyInstaller.__pathex__ = self.pathex[:]
scripts = [] # will contain scripts to bundle
for i, script in enumerate(self.inputs):
logger.info("Analyzing %s", script)
importTracker.analyze_script(script)
scripts.append((pynms[i], script, 'PYSOURCE'))
PyInstaller.__pathex__ = []
# analyze the script's hidden imports
for modnm in self.hiddenimports:
if modnm in importTracker.modules:
logger.info("Hidden import %r has been found otherwise", modnm)
continue
logger.info("Analyzing hidden import %r", modnm)
importTracker.analyze_one(modnm)
if not modnm in importTracker.modules:
logger.error("Hidden import %r not found", modnm)
###################################################
# Fills pure, binaries and rthookcs lists to TOC
pure = [] # pure python modules
zipfiles = [] # zipfiles to bundle
datas = [] # datafiles to bundle
rthooks = [] # rthooks if needed
# Find rthooks.
logger.info("Looking for run-time hooks")
for modnm, mod in importTracker.modules.items():
rthooks.extend(_findRTHook(modnm))
# Analyze rthooks. Runtime hooks has to be also analyzed.
# Otherwise some dependencies could be missing.
# Data structure in format:
# ('rt_hook_mod_name', '/rt/hook/file/name.py', 'PYSOURCE')
for hook_mod, hook_file, mod_type in rthooks:
logger.info("Analyzing rthook %s", hook_file)
importTracker.analyze_script(hook_file)
for modnm, mod in importTracker.modules.items():
# FIXME: why can we have a mod == None here?
if mod is None:
continue
datas.extend(mod.datas)
if isinstance(mod, PyInstaller.depend.modules.BuiltinModule):
pass
elif isinstance(mod, PyInstaller.depend.modules.ExtensionModule):
binaries.append((mod.__name__, mod.__file__, 'EXTENSION'))
# allows hooks to specify additional dependency
# on other shared libraries loaded at runtime (by dlopen)
binaries.extend(mod.binaries)
elif isinstance(mod, (PyInstaller.depend.modules.PkgInZipModule, PyInstaller.depend.modules.PyInZipModule)):
zipfiles.append(("eggs/" + os.path.basename(str(mod.owner)),
str(mod.owner), 'ZIPFILE'))
else:
# mf.PyModule instances expose a list of binary
# dependencies, most probably shared libraries accessed
# via ctypes. Add them to the overall required binaries.
binaries.extend(mod.binaries)
if modnm != '__main__':
pure.append((modnm, mod.__file__, 'PYMODULE'))
# Add remaining binary dependencies
binaries.extend(bindepend.Dependencies(binaries,
manifest=depmanifest))
if is_win:
depmanifest.writeprettyxml()
self._check_python_library(binaries)
if zipfiles:
scripts.insert(-1, ("_pyi_egg_install.py", os.path.join(HOMEPATH, "support/_pyi_egg_install.py"), 'PYSOURCE'))
# Add realtime hooks just before the last script (which is
# the entrypoint of the application).
scripts[-1:-1] = rthooks
self.scripts = TOC(scripts)
self.pure = TOC(pure)
self.binaries = TOC(binaries)
self.zipfiles = TOC(zipfiles)
self.datas = TOC(datas)
try: # read .toc
oldstuff = _load_data(self.out)
except:
oldstuff = None
self.pure = TOC(compile_pycos(self.pure))
newstuff = tuple([getattr(self, g[0]) for g in self.GUTS])
if oldstuff != newstuff:
_save_data(self.out, newstuff)
wf = open(WARNFILE, 'w')
for ln in importTracker.getwarnings():
wf.write(ln + '\n')
wf.close()
logger.info("Warnings written to %s", WARNFILE)
return 1
logger.info("%s no change!", self.out)
return 0
def _check_python_library(self, binaries):
"""
Verify presence of the Python dynamic library. If missing
from the binaries try to find the Python library. Set
the library name for the bootloader.
Some linux distributions (e.g. debian-based) statically build the
Python executable to the libpython, so bindepend doesn't include
it in its output.
Darwin custom builds could possibly also have non-framework style libraries,
so this method also checks for that variant as well.
"""
pyver = sys.version_info[:2]
if is_win:
names = ('python%d%d.dll' % pyver,)
elif is_cygwin:
names = ('libpython%d%d.dll' % pyver,)
elif is_darwin:
names = ('Python', '.Python', 'libpython%d.%d.dylib' % pyver)
elif is_aix:
# Shared libs on AIX are archives with shared object members, thus the ".a" suffix.
names = ('libpython%d.%d.a' % pyver,)
elif is_unix:
# Other *nix platforms.
names = ('libpython%d.%d.so.1.0' % pyver,)
else:
raise SystemExit('Your platform is not yet supported.')
for (nm, fnm, typ) in binaries:
for name in names:
if typ == 'BINARY' and fnm.endswith(name):
# Python library found.
# FIXME Find a different way how to pass python libname to CArchive.
os.environ['PYI_PYTHON_LIBRARY_NAME'] = name
return # Stop fuction.
# Resume search using the first item in names.
name = names[0]
logger.info('Looking for Python library %s', name)
if is_unix:
lib = bindepend.findLibrary(name)
if lib is None:
raise IOError("Python library not found!")
elif is_darwin:
# On MacPython, Analysis.assemble is able to find the libpython with
# no additional help, asking for sys.executable dependencies.
# However, this fails on system python, because the shared library
# is not listed as a dependency of the binary (most probably it's
# opened at runtime using some dlopen trickery).
# This happens on Mac OS X when Python is compiled as Framework.
# Python compiled as Framework contains same values in sys.prefix
# and exec_prefix. That's why we can use just sys.prefix.
# In virtualenv PyInstaller is not able to find Python library.
# We need special care for this case.
if compat.is_virtualenv:
py_prefix = sys.real_prefix
else:
py_prefix = sys.prefix
logger.info('Looking for Python library in %s', py_prefix)
lib = os.path.join(py_prefix, name)
if not os.path.exists(lib):
raise IOError("Python library not found!")
# Python library found.
# FIXME Find a different way how to pass python libname to CArchive.
os.environ['PYI_PYTHON_LIBRARY_NAME'] = name
# Include Python library as binary dependency.
binaries.append((os.path.basename(lib), lib, 'BINARY'))
def _findRTHook(modnm):
rslt = []
for script in rthooks.get(modnm) or []:
nm = os.path.basename(script)
nm = os.path.splitext(nm)[0]
if os.path.isabs(script):
path = script
else:
path = os.path.join(HOMEPATH, script)
rslt.append((nm, path, 'PYSOURCE'))
return rslt
class PYZ(Target):
typ = 'PYZ'
def __init__(self, toc, name=None, level=9, crypt=None):
Target.__init__(self)
self.toc = toc
self.name = name
if name is None:
self.name = self.out[:-3] + 'pyz'
# Level of zlib compression.
self.level = level
if config['useCrypt'] and crypt is not None:
self.crypt = pyi_archive.Keyfile(crypt).key
else:
self.crypt = None
self.dependencies = compile_pycos(config['PYZ_dependencies'])
self.__postinit__()
GUTS = (('name', _check_guts_eq),
('level', _check_guts_eq),
('crypt', _check_guts_eq),
('toc', _check_guts_toc), # todo: pyc=1
)
def check_guts(self, last_build):
if not os.path.exists(self.name):
logger.info("rebuilding %s because %s is missing",
self.outnm, os.path.basename(self.name))
return True
data = Target.get_guts(self, last_build)
if not data:
return True
return False
def assemble(self):
logger.info("building PYZ %s", os.path.basename(self.out))
pyz = pyi_archive.ZlibArchive(level=self.level, crypt=self.crypt)
toc = self.toc - config['PYZ_dependencies']
pyz.build(self.name, toc)
_save_data(self.out, (self.name, self.level, self.crypt, self.toc))
return 1
def cacheDigest(fnm):
data = open(fnm, "rb").read()
digest = hashlib.md5(data).digest()
return digest
def checkCache(fnm, strip=0, upx=0, dist_nm=None):
"""
Cache prevents preprocessing binary files again and again.
'dist_nm' Filename relative to dist directory. We need it on Mac
to determine level of paths for @loader_path like
'@loader_path/../../' for qt4 plugins.
"""
# On darwin a cache is required anyway to keep the libaries
# with relative install names. Caching on darwin does not work
# since we need to modify binary headers to use relative paths
# to dll depencies and starting with '@loader_path'.
if ((not strip and not upx and not is_darwin and not is_win)
or fnm.lower().endswith(".manifest")):
return fnm
if strip:
strip = 1
else:
strip = 0
if upx:
upx = 1
else:
upx = 0
# Load cache index
# Make cachedir per Python major/minor version.
# This allows parallel building of executables with different
# Python versions as one user.
pyver = ('py%d%s') % (sys.version_info[0], sys.version_info[1])
cachedir = os.path.join(CONFIGDIR, 'bincache%d%d_%s' % (strip, upx, pyver))
if not os.path.exists(cachedir):
os.makedirs(cachedir)
cacheindexfn = os.path.join(cachedir, "index.dat")
if os.path.exists(cacheindexfn):
cache_index = _load_data(cacheindexfn)
else:
cache_index = {}
# Verify if the file we're looking for is present in the cache.
basenm = os.path.normcase(os.path.basename(fnm))
digest = cacheDigest(fnm)
cachedfile = os.path.join(cachedir, basenm)
cmd = None
if basenm in cache_index:
if digest != cache_index[basenm]:
os.remove(cachedfile)
else:
# On Mac OS X we need relative paths to dll dependencies
# starting with @executable_path
if is_darwin:
dylib.mac_set_relative_dylib_deps(cachedfile, dist_nm)
return cachedfile
if upx:
if strip:
fnm = checkCache(fnm, 1, 0)
bestopt = "--best"
# FIXME: Linux builds of UPX do not seem to contain LZMA (they assert out)
# A better configure-time check is due.
if config["hasUPX"] >= (3,) and os.name == "nt":
bestopt = "--lzma"
upx_executable = "upx"
if config.get('upx_dir'):
upx_executable = os.path.join(config['upx_dir'], upx_executable)
cmd = [upx_executable, bestopt, "-q", cachedfile]
else:
if strip:
# -S = strip only debug symbols.
# The default strip behaviour breaks some shared libraries
# under Mac OSX
cmd = ["strip", "-S", cachedfile]
shutil.copy2(fnm, cachedfile)
os.chmod(cachedfile, 0755)
if pyasm and fnm.lower().endswith(".pyd"):
# If python.exe has dependent assemblies, check for embedded manifest
# of cached pyd file because we may need to 'fix it' for pyinstaller
try:
res = winmanifest.GetManifestResources(os.path.abspath(cachedfile))
except winresource.pywintypes.error, e:
if e.args[0] == winresource.ERROR_BAD_EXE_FORMAT:
# Not a win32 PE file
pass
else:
logger.error(os.path.abspath(cachedfile))
raise
else:
if winmanifest.RT_MANIFEST in res and len(res[winmanifest.RT_MANIFEST]):
for name in res[winmanifest.RT_MANIFEST]:
for language in res[winmanifest.RT_MANIFEST][name]:
try:
manifest = winmanifest.Manifest()
manifest.filename = ":".join([cachedfile,
str(winmanifest.RT_MANIFEST),
str(name),
str(language)])
manifest.parse_string(res[winmanifest.RT_MANIFEST][name][language],
False)
except Exception, exc:
logger.error("Cannot parse manifest resource %s, "
"%s from", name, language)
logger.error(cachedfile)
logger.exception(exc)
else:
# Fix the embedded manifest (if any):
# Extension modules built with Python 2.6.5 have
# an empty <dependency> element, we need to add
# dependentAssemblies from python.exe for
# pyinstaller
olen = len(manifest.dependentAssemblies)
_depNames = set([dep.name for dep in
manifest.dependentAssemblies])
for pydep in pyasm:
if not pydep.name in _depNames:
logger.info("Adding %r to dependent "
"assemblies of %r",
pydep.name, cachedfile)
manifest.dependentAssemblies.append(pydep)
_depNames.update(pydep.name)
if len(manifest.dependentAssemblies) > olen:
try:
manifest.update_resources(os.path.abspath(cachedfile),
[name],
[language])
except Exception, e:
logger.error(os.path.abspath(cachedfile))
raise
if cmd:
try:
compat.exec_command(*cmd)
except OSError, e:
raise SystemExit("Execution failed: %s" % e)
# update cache index
cache_index[basenm] = digest
_save_data(cacheindexfn, cache_index)
# On Mac OS X we need relative paths to dll dependencies
# starting with @executable_path
if is_darwin:
dylib.mac_set_relative_dylib_deps(cachedfile, dist_nm)
return cachedfile
UNCOMPRESSED, COMPRESSED, ENCRYPTED = range(3)
class PKG(Target):
typ = 'PKG'
xformdict = {'PYMODULE': 'm',
'PYSOURCE': 's',
'EXTENSION': 'b',
'PYZ': 'z',
'PKG': 'a',
'DATA': 'x',
'BINARY': 'b',
'ZIPFILE': 'Z',
'EXECUTABLE': 'b',
'DEPENDENCY': 'd'}
def __init__(self, toc, name=None, cdict=None, exclude_binaries=0,
strip_binaries=0, upx_binaries=0, crypt=0):
Target.__init__(self)
self.toc = toc
self.cdict = cdict
self.name = name
self.exclude_binaries = exclude_binaries
self.strip_binaries = strip_binaries
self.upx_binaries = upx_binaries
self.crypt = crypt
if name is None:
self.name = self.out[:-3] + 'pkg'
if self.cdict is None:
self.cdict = {'EXTENSION': COMPRESSED,
'DATA': COMPRESSED,
'BINARY': COMPRESSED,
'EXECUTABLE': COMPRESSED,
'PYSOURCE': COMPRESSED,
'PYMODULE': COMPRESSED}
if self.crypt:
self.cdict['PYSOURCE'] = ENCRYPTED
self.cdict['PYMODULE'] = ENCRYPTED
self.__postinit__()
GUTS = (('name', _check_guts_eq),
('cdict', _check_guts_eq),
('toc', _check_guts_toc_mtime),
('exclude_binaries', _check_guts_eq),
('strip_binaries', _check_guts_eq),
('upx_binaries', _check_guts_eq),
('crypt', _check_guts_eq),
)
def check_guts(self, last_build):
if not os.path.exists(self.name):
logger.info("rebuilding %s because %s is missing",
self.outnm, os.path.basename(self.name))
return 1
data = Target.get_guts(self, last_build)
if not data:
return True
# todo: toc equal
return False
def assemble(self):
logger.info("building PKG %s", os.path.basename(self.name))
trash = []
mytoc = []
seen = {}
toc = addSuffixToExtensions(self.toc)
for inm, fnm, typ in toc:
if not os.path.isfile(fnm) and check_egg(fnm):
# file is contained within python egg, it is added with the egg
continue
if typ in ('BINARY', 'EXTENSION', 'DEPENDENCY'):
if self.exclude_binaries and typ != 'DEPENDENCY':
self.dependencies.append((inm, fnm, typ))
else:
fnm = checkCache(fnm, self.strip_binaries,
self.upx_binaries and (is_win or is_cygwin)
and config['hasUPX'], dist_nm=inm)
# Avoid importing the same binary extension twice. This might
# happen if they come from different sources (eg. once from
# binary dependence, and once from direct import).
if typ == 'BINARY' and fnm in seen:
continue
seen[fnm] = 1
mytoc.append((inm, fnm, self.cdict.get(typ, 0),
self.xformdict.get(typ, 'b')))
elif typ == 'OPTION':
mytoc.append((inm, '', 0, 'o'))
else:
mytoc.append((inm, fnm, self.cdict.get(typ, 0), self.xformdict.get(typ, 'b')))
# Bootloader has to know the name of Python library.
# FIXME Find a different way how to pass python libname to CArchive.
archive = pyi_carchive.CArchive(
pylib_name=os.environ['PYI_PYTHON_LIBRARY_NAME'])
archive.build(self.name, mytoc)
_save_data(self.out,
(self.name, self.cdict, self.toc, self.exclude_binaries,
self.strip_binaries, self.upx_binaries, self.crypt))
for item in trash:
os.remove(item)
return 1
class EXE(Target):
typ = 'EXECUTABLE'
exclude_binaries = 0
append_pkg = 1
def __init__(self, *args, **kws):
Target.__init__(self)
self.console = kws.get('console', 1)
self.debug = kws.get('debug', 0)
self.name = kws.get('name', None)
self.icon = kws.get('icon', None)
self.versrsrc = kws.get('version', None)
self.manifest = kws.get('manifest', None)
self.resources = kws.get('resources', [])
self.strip = kws.get('strip', None)
self.upx = kws.get('upx', None)
self.crypt = kws.get('crypt', 0)
self.exclude_binaries = kws.get('exclude_binaries', 0)
self.append_pkg = kws.get('append_pkg', self.append_pkg)
if self.name is None:
self.name = self.out[:-3] + 'exe'
if not os.path.isabs(self.name):
self.name = os.path.join(SPECPATH, self.name)
if is_win or is_cygwin:
self.pkgname = self.name[:-3] + 'pkg'
else:
self.pkgname = self.name + '.pkg'
self.toc = TOC()
for arg in args:
if isinstance(arg, TOC):
self.toc.extend(arg)
elif isinstance(arg, Target):
self.toc.append((os.path.basename(arg.name), arg.name, arg.typ))
self.toc.extend(arg.dependencies)
else:
self.toc.extend(arg)
if is_win:
filename = os.path.join(BUILDPATH, specnm + ".exe.manifest")
self.manifest = winmanifest.create_manifest(filename, self.manifest,
self.console)
self.toc.append((os.path.basename(self.name) + ".manifest", filename,
'BINARY'))
self.pkg = PKG(self.toc, cdict=kws.get('cdict', None),
exclude_binaries=self.exclude_binaries,
strip_binaries=self.strip, upx_binaries=self.upx,
crypt=self.crypt)
self.dependencies = self.pkg.dependencies
self.__postinit__()
GUTS = (('name', _check_guts_eq),
('console', _check_guts_eq),
('debug', _check_guts_eq),
('icon', _check_guts_eq),
('versrsrc', _check_guts_eq),
('resources', _check_guts_eq),
('strip', _check_guts_eq),
('upx', _check_guts_eq),
('crypt', _check_guts_eq),
('mtm', None,), # checked bellow
)
def check_guts(self, last_build):
if not os.path.exists(self.name):
logger.info("rebuilding %s because %s missing",
self.outnm, os.path.basename(self.name))
return 1
if not self.append_pkg and not os.path.exists(self.pkgname):
logger.info("rebuilding because %s missing",
os.path.basename(self.pkgname))
return 1
data = Target.get_guts(self, last_build)
if not data:
return True
icon, versrsrc, resources = data[3:6]
if (icon or versrsrc or resources) and not config['hasRsrcUpdate']:
# todo: really ignore :-)
logger.info("ignoring icon, version, manifest and resources = platform not capable")
mtm = data[-1]
crypt = data[-2]
if crypt != self.crypt:
logger.info("rebuilding %s because crypt option changed", self.outnm)
return 1
if mtm != mtime(self.name):
logger.info("rebuilding %s because mtimes don't match", self.outnm)
return True
if mtm < mtime(self.pkg.out):
logger.info("rebuilding %s because pkg is more recent", self.outnm)
return True
return False
def _bootloader_file(self, exe):
if not self.console:
exe = exe + 'w'
if self.debug:
exe = exe + '_d'
return os.path.join("support", "loader", PLATFORM, exe)
def assemble(self):
logger.info("building EXE from %s", os.path.basename(self.out))
trash = []
if not os.path.exists(os.path.dirname(self.name)):
os.makedirs(os.path.dirname(self.name))
outf = open(self.name, 'wb')
exe = self._bootloader_file('run')
exe = os.path.join(HOMEPATH, exe)
if is_win or is_cygwin:
exe = exe + '.exe'
if config['hasRsrcUpdate'] and (self.icon or self.versrsrc or
self.resources):
tmpnm = tempfile.mktemp()
shutil.copy2(exe, tmpnm)
os.chmod(tmpnm, 0755)
if self.icon:
icon.CopyIcons(tmpnm, self.icon)
if self.versrsrc:
versioninfo.SetVersion(tmpnm, self.versrsrc)
for res in self.resources:
res = res.split(",")
for i in range(1, len(res)):
try:
res[i] = int(res[i])
except ValueError:
pass
resfile = res[0]
restype = resname = reslang = None
if len(res) > 1:
restype = res[1]
if len(res) > 2:
resname = res[2]
if len(res) > 3:
reslang = res[3]
try:
winresource.UpdateResourcesFromResFile(tmpnm, resfile,
[restype or "*"],
[resname or "*"],
[reslang or "*"])
except winresource.pywintypes.error, exc:
if exc.args[0] != winresource.ERROR_BAD_EXE_FORMAT:
logger.exception(exc)
continue
if not restype or not resname:
logger.error("resource type and/or name not specified")
continue
if "*" in (restype, resname):
logger.error("no wildcards allowed for resource type "
"and name when source file does not "
"contain resources")
continue
try:
winresource.UpdateResourcesFromDataFile(tmpnm,
resfile,
restype,
[resname],
[reslang or 0])
except winresource.pywintypes.error, exc:
logger.exception(exc)
trash.append(tmpnm)
exe = tmpnm
exe = checkCache(exe, self.strip, self.upx and config['hasUPX'])
self.copy(exe, outf)
if self.append_pkg:
logger.info("Appending archive to EXE %s", self.name)
self.copy(self.pkg.name, outf)
else:
logger.info("Copying archive to %s", self.pkgname)
shutil.copy2(self.pkg.name, self.pkgname)
outf.close()
os.chmod(self.name, 0755)
guts = (self.name, self.console, self.debug, self.icon,
self.versrsrc, self.resources, self.strip, self.upx,
self.crypt, mtime(self.name))
assert len(guts) == len(self.GUTS)
_save_data(self.out, guts)
for item in trash:
os.remove(item)
return 1
def copy(self, fnm, outf):
inf = open(fnm, 'rb')
while 1:
data = inf.read(64 * 1024)
if not data:
break
outf.write(data)
class DLL(EXE):
def assemble(self):
logger.info("building DLL %s", os.path.basename(self.out))
outf = open(self.name, 'wb')
dll = self._bootloader_file('inprocsrvr')
dll = os.path.join(HOMEPATH, dll) + '.dll'
self.copy(dll, outf)
self.copy(self.pkg.name, outf)
outf.close()
os.chmod(self.name, 0755)
_save_data(self.out,
(self.name, self.console, self.debug, self.icon,
self.versrsrc, self.manifest, self.resources, self.strip, self.upx, mtime(self.name)))
return 1
class COLLECT(Target):
def __init__(self, *args, **kws):
Target.__init__(self)
self.name = kws.get('name', None)
if self.name is None:
self.name = 'dist_' + self.out[:-4]
self.strip_binaries = kws.get('strip', 0)
self.upx_binaries = kws.get('upx', 0)
if not os.path.isabs(self.name):
self.name = os.path.join(SPECPATH, self.name)
self.toc = TOC()
for arg in args:
if isinstance(arg, TOC):
self.toc.extend(arg)
elif isinstance(arg, Target):
self.toc.append((os.path.basename(arg.name), arg.name, arg.typ))
if isinstance(arg, EXE):
for tocnm, fnm, typ in arg.toc:
if tocnm == os.path.basename(arg.name) + ".manifest":
self.toc.append((tocnm, fnm, typ))
if not arg.append_pkg:
self.toc.append((os.path.basename(arg.pkgname), arg.pkgname, 'PKG'))
self.toc.extend(arg.dependencies)
else:
self.toc.extend(arg)
self.__postinit__()
GUTS = (('name', _check_guts_eq),
('strip_binaries', _check_guts_eq),
('upx_binaries', _check_guts_eq),
('toc', _check_guts_eq), # additional check below
)
def check_guts(self, last_build):
# COLLECT always needs to be executed, since it will clean the output
# directory anyway to make sure there is no existing cruft accumulating
return 1
def assemble(self):
if _check_path_overlap(self.name) and os.path.isdir(self.name):
_rmtree(self.name)
logger.info("building COLLECT %s", os.path.basename(self.out))
os.makedirs(self.name)
toc = addSuffixToExtensions(self.toc)
for inm, fnm, typ in toc:
if not os.path.isfile(fnm) and check_egg(fnm):
# file is contained within python egg, it is added with the egg
continue
tofnm = os.path.join(self.name, inm)
todir = os.path.dirname(tofnm)
if not os.path.exists(todir):
os.makedirs(todir)
if typ in ('EXTENSION', 'BINARY'):
fnm = checkCache(fnm, self.strip_binaries,
self.upx_binaries and (is_win or is_cygwin)
and config['hasUPX'], dist_nm=inm)
if typ != 'DEPENDENCY':
shutil.copy2(fnm, tofnm)
if typ in ('EXTENSION', 'BINARY'):
os.chmod(tofnm, 0755)
_save_data(self.out,
(self.name, self.strip_binaries, self.upx_binaries, self.toc))
return 1
class BUNDLE(Target):
def __init__(self, *args, **kws):
# BUNDLE only has a sense under Mac OS X, it's a noop on other platforms
if not is_darwin:
return
# icns icon for app bundle.
self.icon = kws.get('icon', os.path.join(os.path.dirname(__file__),
'..', 'source', 'images', 'icon-windowed.icns'))
Target.__init__(self)
self.name = kws.get('name', None)
if self.name is not None:
self.appname = os.path.splitext(os.path.basename(self.name))[0]
self.version = kws.get("version", "0.0.0")
self.toc = TOC()
for arg in args:
if isinstance(arg, EXE):
self.toc.append((os.path.basename(arg.name), arg.name, arg.typ))
self.toc.extend(arg.dependencies)
elif isinstance(arg, TOC):
self.toc.extend(arg)
elif isinstance(arg, COLLECT):
self.toc.extend(arg.toc)
else:
logger.info("unsupported entry %s", arg.__class__.__name__)
# Now, find values for app filepath (name), app name (appname), and name
# of the actual executable (exename) from the first EXECUTABLE item in
# toc, which might have come from a COLLECT too (not from an EXE).
for inm, name, typ in self.toc:
if typ == "EXECUTABLE":
self.exename = name
if self.name is None:
self.appname = "Mac%s" % (os.path.splitext(inm)[0],)
self.name = os.path.join(SPECPATH, self.appname + ".app")
else:
self.name = os.path.join(SPECPATH, self.name)
break
self.__postinit__()
GUTS = (('toc', _check_guts_eq), # additional check below
)
def check_guts(self, last_build):
# BUNDLE always needs to be executed, since it will clean the output
# directory anyway to make sure there is no existing cruft accumulating
return 1
def assemble(self):
if _check_path_overlap(self.name) and os.path.isdir(self.name):
_rmtree(self.name)
logger.info("building BUNDLE %s", os.path.basename(self.out))
# Create a minimal Mac bundle structure
os.makedirs(os.path.join(self.name, "Contents", "MacOS"))
os.makedirs(os.path.join(self.name, "Contents", "Resources"))
os.makedirs(os.path.join(self.name, "Contents", "Frameworks"))
# Copy icns icon to Resources directory.
if os.path.exists(self.icon):
shutil.copy(self.icon, os.path.join(self.name, 'Contents', 'Resources'))
else:
logger.warn("icon not found %s" % self.icon)
# Key/values for a minimal Info.plist file
info_plist_dict = {"CFBundleDisplayName": self.appname,
"CFBundleName": self.appname,
# Fix for #156 - 'MacOS' must be in the name - not sure why
"CFBundleExecutable": 'MacOS/%s' % os.path.basename(self.exename),
"CFBundleIconFile": os.path.basename(self.icon),
"CFBundleInfoDictionaryVersion": "6.0",
"CFBundlePackageType": "APPL",
"CFBundleShortVersionString": self.version,
# Setting this to 1 will cause Mac OS X *not* to show
# a dock icon for the PyInstaller process which
# decompresses the real executable's contents. As a
# side effect, the main application doesn't get one
# as well, but at startup time the loader will take
# care of transforming the process type.
"LSBackgroundOnly": "1",
}
info_plist = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>"""
for k, v in info_plist_dict.items():
info_plist += "<key>%s</key>\n<string>%s</string>\n" % (k, v)
info_plist += """</dict>
</plist>"""
f = open(os.path.join(self.name, "Contents", "Info.plist"), "w")
f.write(info_plist)
f.close()
toc = addSuffixToExtensions(self.toc)
for inm, fnm, typ in toc:
# Copy files from cache. This ensures that are used files with relative
# paths to dynamic library dependencies (@executable_path)
if typ in ('EXTENSION', 'BINARY'):
fnm = checkCache(fnm, dist_nm=inm)
tofnm = os.path.join(self.name, "Contents", "MacOS", inm)
todir = os.path.dirname(tofnm)
if not os.path.exists(todir):
os.makedirs(todir)
shutil.copy2(fnm, tofnm)
## For some hooks copy resource to ./Contents/Resources dir.
# PyQt4 hook: On Mac Qt requires resources 'qt_menu.nib'.
# It is copied from dist directory.
qt_menu_dir = os.path.join(self.name, 'Contents', 'MacOS', 'qt_menu.nib')
qt_menu_dest = os.path.join(self.name, 'Contents', 'Resources', 'qt_menu.nib')
if os.path.exists(qt_menu_dir):
shutil.copytree(qt_menu_dir, qt_menu_dest)
return 1
class TOC(UserList.UserList):
def __init__(self, initlist=None):
UserList.UserList.__init__(self)
self.fltr = {}
if initlist:
for tpl in initlist:
self.append(tpl)
def append(self, tpl):
try:
fn = tpl[0]
if tpl[2] == "BINARY":
# Normalize the case for binary files only (to avoid duplicates
# for different cases under Windows). We can't do that for
# Python files because the import semantic (even at runtime)
# depends on the case.
fn = os.path.normcase(fn)
if not self.fltr.get(fn):
self.data.append(tpl)
self.fltr[fn] = 1
except TypeError:
logger.info("TOC found a %s, not a tuple", tpl)
raise
def insert(self, pos, tpl):
fn = tpl[0]
if tpl[2] == "BINARY":
fn = os.path.normcase(fn)
if not self.fltr.get(fn):
self.data.insert(pos, tpl)
self.fltr[fn] = 1
def __add__(self, other):
rslt = TOC(self.data)
rslt.extend(other)
return rslt
def __radd__(self, other):
rslt = TOC(other)
rslt.extend(self.data)
return rslt
def extend(self, other):
for tpl in other:
self.append(tpl)
def __sub__(self, other):
fd = self.fltr.copy()
# remove from fd if it's in other
for tpl in other:
if fd.get(tpl[0], 0):
del fd[tpl[0]]
rslt = TOC()
# return only those things still in fd (preserve order)
for tpl in self.data:
if fd.get(tpl[0], 0):
rslt.append(tpl)
return rslt
def __rsub__(self, other):
rslt = TOC(other)
return rslt.__sub__(self)
def intersect(self, other):
rslt = TOC()
for tpl in other:
if self.fltr.get(tpl[0], 0):
rslt.append(tpl)
return rslt
class Tree(Target, TOC):
def __init__(self, root=None, prefix=None, excludes=None):
Target.__init__(self)
TOC.__init__(self)
self.root = root
self.prefix = prefix
self.excludes = excludes
if excludes is None:
self.excludes = []
self.__postinit__()
GUTS = (('root', _check_guts_eq),
('prefix', _check_guts_eq),
('excludes', _check_guts_eq),
('toc', None),
)
def check_guts(self, last_build):
data = Target.get_guts(self, last_build)
if not data:
return True
stack = [data[0]] # root
toc = data[3] # toc
while stack:
d = stack.pop()
if mtime(d) > last_build:
logger.info("building %s because directory %s changed",
self.outnm, d)
return True
for nm in os.listdir(d):
path = os.path.join(d, nm)
if os.path.isdir(path):
stack.append(path)
self.data = toc
return False
def assemble(self):
logger.info("building Tree %s", os.path.basename(self.out))
stack = [(self.root, self.prefix)]
excludes = {}
xexcludes = {}
for nm in self.excludes:
if nm[0] == '*':
xexcludes[nm[1:]] = 1
else:
excludes[nm] = 1
rslt = []
while stack:
dir, prefix = stack.pop()
for fnm in os.listdir(dir):
if excludes.get(fnm, 0) == 0:
ext = os.path.splitext(fnm)[1]
if xexcludes.get(ext, 0) == 0:
fullfnm = os.path.join(dir, fnm)
rfnm = prefix and os.path.join(prefix, fnm) or fnm
if os.path.isdir(fullfnm):
stack.append((fullfnm, rfnm))
else:
rslt.append((rfnm, fullfnm, 'DATA'))
self.data = rslt
try:
oldstuff = _load_data(self.out)
except:
oldstuff = None
newstuff = (self.root, self.prefix, self.excludes, self.data)
if oldstuff != newstuff:
_save_data(self.out, newstuff)
return 1
logger.info("%s no change!", self.out)
return 0
class MERGE(object):
"""
Merge repeated dependencies from other executables into the first
execuable. Data and binary files are then present only once and some
disk space is thus reduced.
"""
def __init__(self, *args):
"""
Repeated dependencies are then present only once in the first
executable in the 'args' list. Other executables depend on the
first one. Other executables have to extract necessary files
from the first executable.
args dependencies in a list of (Analysis, id, filename) tuples.
Replace id with the correct filename.
"""
# The first Analysis object with all dependencies.
# Any item from the first executable cannot be removed.
self._main = None
self._dependencies = {}
self._id_to_path = {}
for _, i, p in args:
self._id_to_path[i] = p
# Get the longest common path
self._common_prefix = os.path.dirname(os.path.commonprefix([os.path.abspath(a.scripts[-1][1]) for a, _, _ in args]))
if self._common_prefix[-1] != os.sep:
self._common_prefix += os.sep
logger.info("Common prefix: %s", self._common_prefix)
self._merge_dependencies(args)
def _merge_dependencies(self, args):
"""
Filter shared dependencies to be only in first executable.
"""
for analysis, _, _ in args:
path = os.path.abspath(analysis.scripts[-1][1]).replace(self._common_prefix, "", 1)
path = os.path.splitext(path)[0]
if path in self._id_to_path:
path = self._id_to_path[path]
self._set_dependencies(analysis, path)
def _set_dependencies(self, analysis, path):
"""
Syncronize the Analysis result with the needed dependencies.
"""
for toc in (analysis.binaries, analysis.datas):
for i, tpl in enumerate(toc):
if not tpl[1] in self._dependencies.keys():
logger.debug("Adding dependency %s located in %s" % (tpl[1], path))
self._dependencies[tpl[1]] = path
else:
dep_path = self._get_relative_path(path, self._dependencies[tpl[1]])
logger.debug("Referencing %s to be a dependecy for %s, located in %s" % (tpl[1], path, dep_path))
analysis.dependencies.append((":".join((dep_path, tpl[0])), tpl[1], "DEPENDENCY"))
toc[i] = (None, None, None)
# Clean the list
toc[:] = [tpl for tpl in toc if tpl != (None, None, None)]
# TODO move this function to PyInstaller.compat module (probably improve
# function compat.relpath()
def _get_relative_path(self, startpath, topath):
start = startpath.split(os.sep)[:-1]
start = ['..'] * len(start)
if start:
start.append(topath)
return os.sep.join(start)
else:
return topath
def TkTree():
raise SystemExit('TkTree has been removed in PyInstaller 2.0. '
'Please update your spec-file. See '
'http://www.pyinstaller.org/wiki/MigrateTo2.0 for details')
def TkPKG():
raise SystemExit('TkPKG has been removed in PyInstaller 2.0. '
'Please update your spec-file. See '
'http://www.pyinstaller.org/wiki/MigrateTo2.0 for details')
def build(spec, buildpath):
global SPECPATH, BUILDPATH, WARNFILE, rthooks, SPEC, specnm
rthooks = _load_data(os.path.join(HOMEPATH, 'support', 'rthooks.dat'))
SPEC = spec
SPECPATH, specnm = os.path.split(spec)
specnm = os.path.splitext(specnm)[0]
if SPECPATH == '':
SPECPATH = os.getcwd()
BUILDPATH = os.path.join(SPECPATH, 'build',
"pyi." + sys.platform, specnm)
# Check and adjustment for build path
if buildpath != DEFAULT_BUILDPATH:
bpath = buildpath
if os.path.isabs(bpath):
BUILDPATH = bpath
else:
BUILDPATH = os.path.join(SPECPATH, bpath)
WARNFILE = os.path.join(BUILDPATH, 'warn%s.txt' % specnm)
if not os.path.exists(BUILDPATH):
os.makedirs(BUILDPATH)
# Executing the specfile (it's a valid python file)
execfile(spec)
def __add_options(parser):
parser.add_option('--buildpath', default=DEFAULT_BUILDPATH,
help='Buildpath (default: %default)')
parser.add_option('-y', '--noconfirm',
action="store_true", default=False,
help='Remove output directory (default: %s) without '
'confirmation' % os.path.join('SPECPATH', 'dist', 'SPECNAME'))
parser.add_option('--upx-dir', default=None,
help='Directory containing UPX (default: search in path)')
parser.add_option("-a", "--ascii", action="store_true",
help="do NOT include unicode encodings "
"(default: included if available)")
def main(specfile, buildpath, noconfirm, ascii=False, **kw):
global config
global icon, versioninfo, winresource, winmanifest, pyasm
global HIDDENIMPORTS, NOCONFIRM
NOCONFIRM = noconfirm
# Test unicode support.
if not ascii:
HIDDENIMPORTS.extend(misc.get_unicode_modules())
# FIXME: this should be a global import, but can't due to recursive imports
import PyInstaller.configure as configure
config = configure.get_config(kw.get('upx_dir'))
if config['hasRsrcUpdate']:
from PyInstaller.utils import icon, versioninfo, winresource
pyasm = bindepend.getAssemblies(sys.executable)
else:
pyasm = None
if config['hasUPX']:
setupUPXFlags()
if not config['useELFEXE']:
EXE.append_pkg = 0
build(specfile, buildpath)
| [
"[email protected]"
] | |
243b30d8a04317b70aab7c0bbadabf27a895a4a2 | 480a175ab2b3c012af2d1cddb79674fad1490fe5 | /0x08-python-more_classes/tests/main.2.py | 2cb60d1c599573c08cc695829729fe51c64ab27d | [] | no_license | ianliu-johnston/holbertonschool-higher_level_programming | a8a6476fc6a7ac0bd8ae300f2196f17c13e1b36f | f6a7c9cddb2482991c2aadacb99aa66e64eb50eb | refs/heads/master | 2021-04-29T11:12:56.820851 | 2017-05-10T00:48:17 | 2017-05-10T00:48:17 | 77,854,226 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | #!/usr/bin/python3
Rectangle = __import__('2-rectangle').Rectangle
new_rect = Rectangle(3, 4)
print("Dimensions of your new rectangle: {} x {}".format(new_rect.width, new_rect.height))
print("Area: {}".format(new_rect.area()))
print("Perimeter: {}".format(new_rect.perimeter()))
new_rect.width = 5
print("Width just changed. New Dimensions: {} x {}".format(new_rect.width, new_rect.height))
print("Area: {}".format(new_rect.area()))
print("Perimeter: {}".format(new_rect.perimeter()))
new_rect.height = 15
print("height just changed. New Dimensions: {} x {}".format(new_rect.width, new_rect.height))
print("Area: {}".format(new_rect.area()))
print("Perimeter: {}".format(new_rect.perimeter()))
print("Making another one.")
next_rect = Rectangle()
print("Dimensions of your new rectangle: {} x {}".format(next_rect.width, next_rect.height))
print("Area: {}".format(next_rect.area()))
print("Perimeter: {}".format(next_rect.perimeter()))
| [
"[email protected]"
] | |
702e93ec385bbb5567fec0ac4ca70cf08f9f04db | 7dbcf66e47684c652f9d90a47b2381cf846e003d | /pkg/Conf.py | d8e12155528eb0090ab0006f88fcc253282e3ede | [] | no_license | hlanSmart/simple | 531b9a8be524d29c43016c865f64132aa4bf3069 | c8536edd4cec1f39e23a5ff35ae16f0efa15f323 | refs/heads/master | 2020-12-27T08:24:04.383170 | 2016-09-22T04:29:44 | 2016-09-22T04:29:44 | 68,556,669 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | #!/usr/bin/python
#coding:utf-8
import os,yaml
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def readServer(sg,sl=False): #sg ServerGroup 服务器组 sl ServerList 组列表
with open(os.path.join(BASE_PATH,'etc/server.yml'),'r') as f:
server=yaml.load(f)
if sl: #当ServerList为真时返回组,而不是组信息
li=[]
for i in server:
li.append(i)
return li
if sg in server:
gp=server[sg] #gp group 服务器组信息
for i in gp: #默认22端口在配置文件不存在,所以手动添加到返回结果
if len(gp[i])<3:
gp[i].append(22)
return gp
return False #Server Group 不存在时返回False
def readYaml(P):
try:
with open(P) as f:
return yaml.load(f)
except Exception as e:
print(e)
return False
| [
"root@localhost"
] | root@localhost |
c2bb14d7ae24c97ce9e538b563179a0fb27d3f71 | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/cache/77d922e63877a9db19d31d69878e680aa58a54c85eee51673bc8bfa5abec9462/cython_runtime.py | f0626603901297c264822be8e70b80c27bee933e | [] | no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | # encoding: utf-8
# module cython_runtime
# from C:\Users\Doly\Anaconda3\lib\site-packages\scipy\stats\statlib.cp37-win_amd64.pyd
# by generator 1.147
# no doc
# no imports
# Variables with simple values
__loader__ = None
__spec__ = None
# no functions
# no classes
| [
"[email protected]"
] | |
e4d3b1c290b0ee2787f51f3bb625a45c1c113234 | 6daa3815511b1eb1f4ff3a40b7e9332fab38b8ef | /tastesavant/taste/apps/profiles/migrations/0010_auto__add_field_profile_preferred_site__chg_field_profile_user.py | f631b68b525621e7885479041e53e8ea8b703f7e | [] | no_license | kaizensoze/archived-projects | 76db01309453606e6b7dd9d2ff926cfee42bcb05 | d39ac099cb40131bac5de66bde7d0e2db5f74189 | refs/heads/master | 2021-05-31T12:16:17.800730 | 2016-02-23T00:27:56 | 2016-02-23T00:27:56 | 14,407,212 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,513 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Profile.preferred_site'
# The default value, 3, should refer to the NYC site.
db.add_column('profiles_profile', 'preferred_site',
self.gf('django.db.models.fields.related.ForeignKey')(default=3, to=orm['sites.Site']),
keep_default=False)
# Changing field 'Profile.user'
db.alter_column('profiles_profile', 'user_id', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True))
def backwards(self, orm):
# Deleting field 'Profile.preferred_site'
db.delete_column('profiles_profile', 'preferred_site_id')
# Changing field 'Profile.user'
db.alter_column('profiles_profile', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], unique=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.friendship': {
'Meta': {'object_name': 'Friendship'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notice_sent_to_user_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Profile']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'profiles.profile': {
'Meta': {'object_name': 'Profile'},
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'blogger': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'digest_notifications': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'favorite_food': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'favorite_restaurant': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'friends': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'friends'", 'to': "orm['auth.User']", 'through': "orm['profiles.Friendship']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'last_sync_facebook': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_sync_foursquare': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notification_level': ('django.db.models.fields.CharField', [], {'default': "'instant'", 'max_length': '16'}),
'preferred_site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'type_expert': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'type_reviewer': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['profiles']
| [
"[email protected]"
] | |
ad784210df07d410b4d9d0b3795e111aa61b9193 | b7453e5a2700f2017a6f783eaf3990ee2486cd65 | /test/utils/test_clean_identity.py | 54c6c0a2df4ef8f53c92989877f93ce940c57635 | [
"Apache-2.0"
] | permissive | LaRiffle/cleaning-scripts | 8525164cca8336b67a2362d6907414e27ca088fa | 08f360721056d30befe8d58ded583a4a5d126184 | refs/heads/master | 2020-07-28T06:52:47.673033 | 2019-11-19T15:26:19 | 2019-11-19T15:26:19 | 209,343,798 | 0 | 0 | Apache-2.0 | 2019-09-20T13:13:25 | 2019-09-18T15:33:16 | Python | UTF-8 | Python | false | false | 233 | py | from scripts import utils
def test_clean_identity():
assert utils.clean_identity(None) == ""
assert utils.clean_identity("NaN") == ""
row_input = "Holà chicanos"
assert utils.clean_identity(row_input) == row_input
| [
"[email protected]"
] | |
1b5cd48ff39ee1da8dbaf2f526d75d0746e5c1e6 | f1d9df04036fc43c9e5cc7998b83261f4daa94b8 | /management_commands/insert_base_data.py | cf87a7c11fd7db6f4e396e72c0e9d41bce402ce1 | [] | no_license | Eaterator/web | 019eb6547995be30b3468e5c44ecc52f05858fb4 | 9c598607f76ad770c66d85c47ffcec05f92f4d66 | refs/heads/master | 2021-01-09T20:30:13.417308 | 2017-04-25T02:44:35 | 2017-04-25T02:44:35 | 81,286,177 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,324 | py | from application.auth.models import Role
from application.recipe.models import Source
from application.base_models import db
def insert_role_data():
roles = [
{
'name': 'regular',
'type_': 'consumer',
'is_admin': False
},
{
'name': 'corporate',
'type_': 'business',
'is_admin': False
},
{
'name': 'admin',
'type_': 'admin',
'is_admin': True
}
]
if len(Role.query.all()) > 0:
return
for role in roles:
new_role = Role(**role)
db.session.add(new_role)
db.session.commit()
def insert_source_data():
sources = [
{
'base_url': 'foodnetwork.com',
'name': 'Food Network'
},
{
'base_url': 'epicurious.com',
'name': 'Epicurious'
},
{
'base_url': 'therecipedepository.com',
'name': 'The Recipe Depository',
},
{
'base_url': 'allrecipes.com',
'name': 'All Recipes',
},
{
'base_url': 'bonappetit.com',
'name': 'Bon Appetit'
},
{
'base_url': 'food.com',
'name': 'Food'
},
{
'base_url': 'simplyrecipes.com',
'name': 'Simply Recipes'
},
{
'base_url': 'bbcgoodfood.com',
'name': 'BBC Good Food'
},
{
'base_url': 'williams-sonoma.com',
'name': 'Williams Sonoma'
},
{
'base_url': 'finedininglovers.com',
'name': 'Fine Dining Lovers'
},
{
'base_url': 'thekitchn.com',
'name': 'The Kitchn'
},
{
'base_url': 'chowhound.com',
'name': 'Chow'
},
{
'base_url': 'myrecipes.com',
'name': 'My Recipes'
},
{
'base_url': '',
'name': 'Other'
}
]
for source in sources:
exists = Source.query.filter(Source.name == source['name']).all()
if len(exists) <= 0:
new_source = Source(**source)
db.session.add(new_source)
db.session.commit()
| [
"[email protected]"
] | |
d74da5f980c51f8a87e1f3491b38cb906651ba91 | 995c52ad5a0a3039ad37a4d2f07b06dcbbcf3961 | /tantalus/migrations/0059_auto_20180810_1837.py | f4ba3f19bfd13e80fa47e558107374b522b8b533 | [] | no_license | nafabrar/tantalus | d02cce3923205191f00b30e80152a0be7c091d6a | d8552d40472c29bc617b45a1edaf87c6624b824d | refs/heads/master | 2022-12-24T15:53:52.034999 | 2020-10-07T22:26:35 | 2020-10-07T22:26:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-08-10 18:37
from __future__ import unicode_literals
from django.db import migrations
def populate_sequence_file_info(apps, schema_editor):
FileResource = apps.get_model('tantalus', 'FileResource')
SequenceFileInfo = apps.get_model('tantalus', 'SequenceFileInfo')
for file_resource in FileResource.objects.all():
sequence_file_info = SequenceFileInfo(
file_resource=file_resource,
owner=file_resource.owner,
read_end=file_resource.read_end,
genome_region=file_resource.genome_region,
index_sequence=file_resource.index_sequence,
)
sequence_file_info.save()
class Migration(migrations.Migration):
dependencies = [
('tantalus', '0058_historicalsequencefileinfo_sequencefileinfo'),
]
operations = [
migrations.RunPython(populate_sequence_file_info)
]
| [
"[email protected]"
] | |
9b9a14f2985d9dd1d7bc6ef666b5d40a2a9a5256 | a7e0784b697b6c57920e16e2f54ea0ed2225c0e0 | /data/clingen_raw_to_training.py | 47d0357cb8921e5915cdc80d02e9879fcf3e88c3 | [] | no_license | rumeysa77/ClinGenML | 17e1a3786b8711387a61707252307aab13e682c5 | c3bf6fbf7d0fe6c1311ce0fcfb4e26d8331bbc7d | refs/heads/master | 2023-03-22T04:41:40.669592 | 2021-02-24T09:04:29 | 2021-02-24T09:04:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,815 | py | """
This file processes the raw excel sheet and extract data
"""
import time
import csv
from collections import defaultdict
from Bio import Entrez
from pathlib import Path
import unicodedata
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
# clean text does not tokenize anything!
def clean_text(text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
def reduce_whitespace(text):
return ' '.join(text.split())
major_5_panels = {'experimental-studies', 'allele-data', 'segregation-data', 'specificity-of-phenotype', 'case-control'}
label_vocab = ['experimental-studies', 'allele-data', 'segregation-data', 'specificity-of-phenotype', 'case-control']
class DatasetExtractor(object):
def __init__(self, path=None):
self.major_5_pmid_to_panel = defaultdict(set)
header = None
if path is not None:
with open(path, encoding='utf-8', errors='ignore') as f:
reader = csv.reader(f)
for i, line in enumerate(reader):
if i == 0:
header = line[:-2]
elif line[4] != '': # ClinVar ID cannot be null
if line[1] in major_5_panels:
self.major_5_pmid_to_panel[line[2]].add(line[1])
def fetch_title_abstract_keywords(self, one_id):
ids = one_id
Entrez.email = '[email protected]'
handle = Entrez.efetch(db='pubmed',
retmode='xml',
id=ids)
results = Entrez.read(handle)
# retrieving for only 1 result
for i, paper in enumerate(results['PubmedArticle']):
abstract = []
if 'Abstract' in paper['MedlineCitation']['Article']:
for section in paper['MedlineCitation']['Article']['Abstract']['AbstractText']:
abstract.append(section)
else:
continue
abstract = " ".join(abstract)
title = paper['MedlineCitation']['Article']['ArticleTitle']
keywords = []
for elem in paper['MedlineCitation']['KeywordList']:
for e in elem:
keywords.append(e)
keywords = ' '.join(keywords)
return title, abstract, keywords
return None
def merge_text(self, title, abstract, keywords, entrez=False):
# a standard function to map
text = ''
if not entrez:
text = title + " || " + " ".join(keywords.split('/')) + " || " + reduce_whitespace(clean_text(abstract))
else:
text = title + " || " + keywords + " || " + reduce_whitespace(clean_text(abstract))
return text
def generate_pmid_panel_set(self, log=False, tqdm=False, notebook=False):
# will call Entrez BioPython to grab abstracts
data = []
pmid_to_data = {}
start = time.time()
cnt = 0
for k, v in self.major_5_pmid_to_panel.items():
cnt += 1
res = self.fetch_title_abstract_keywords(k)
if res is None:
continue # 24940364 is not found...
text = self.merge_text(*res)
# label = ['0'] * len(label_vocab)
label = []
for v_i in v:
label.append(str(label_vocab.index(v_i)))
data.append('\t'.join([text, ' '.join(label)]))
pmid_to_data[k] = '\t'.join([text, ' '.join(label)])
if log:
if cnt % 100 == 0:
print(cnt, time.time() - start, 'secs')
return data, pmid_to_data
def write_data_to_csv(self, data, csv_file_path):
# expect `data` directly from `generate_pmid_panel_set`
with open(csv_file_path, encoding='utf-8', errors='ignore', mode='w') as f:
for line in data:
f.write(line + '\n')
def write_pmid_to_list(self, path):
# it will directly save as "pmids.txt", which is what PubMunch expects
# call this function to generate a list of pmid
# so you can use PubMunch to download
p = Path(path)
p.mkdir(exist_ok=True)
with open('{}/pmids.txt'.format(path), 'w') as f:
for pmid in self.major_5_pmid_to_panel.keys():
f.write(pmid + '\n')
def __sub__(self, other):
assert type(other) == type(self)
new_pmids = set(list(self.major_5_pmid_to_panel.keys())) - set(list(other.major_5_pmid_to_panel))
de = DatasetExtractor()
for pmid in new_pmids:
panel = self.major_5_pmid_to_panel[pmid]
de.major_5_pmid_to_panel[pmid] = panel
return de
if __name__ == '__main__':
# testing
de = DatasetExtractor("../corpus/ML Data (as of 3_17_19).csv")
print(de.merge_text(*de.fetch_title_abstract_keywords("10206684")))
| [
"[email protected]"
] | |
0fca165af2a23670c0fdd4db934637cc1abf3c10 | 77531ad16a3ddf7aa92b7b4de809cce2a96c88a5 | /sitetables/toolbox/sources.py | 53a8ff4e69c31bffc800f47c48937200b5f4ad69 | [] | no_license | idlesign/django-sitetables | 6d3ed6b534e51c67704528d6fa1be0bc6f9f64f4 | 008b748919ee330da168d4766cd6b3c3c27e45b8 | refs/heads/master | 2022-02-17T21:25:26.430653 | 2022-02-04T12:46:19 | 2022-02-04T12:46:19 | 164,444,235 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,028 | py | import re
from collections import namedtuple
from itertools import chain
from typing import Optional, List, Union, Dict, Type, Tuple
from django.db.models import QuerySet, Model
from django.http import HttpRequest, JsonResponse
from django.urls import reverse
from .columns import TableColumn
if False: # pragma: nocover
from .tables import Table
TypeTableSource = Union[Dict, List[Dict], Type[Model], QuerySet]
TypeTableColumns = Dict[str, TableColumn]
TypeFilteredItems = Union[QuerySet, List]
TypeServerItems = Tuple[int, int, TypeFilteredItems]
TypePreparedItems = List[Dict[str, str]]
TableItemsFilter = namedtuple('TableItemsFilter', [
'start',
'length',
'search',
'order',
])
class TableSource:
"""Base data source for tables."""
columns: TypeTableColumns
_columns_by_idx: Dict[int, TableColumn]
_url_responder = None
_RE_COLUMN_DEF = re.compile(r'\[(\d+)\]\[([a-z]+)\]')
def __init__(self, source, options: Optional[dict] = None):
self.columns = {}
self._columns_by_idx = {}
self.row_id = 'DT_RowId'
self.options = options or {}
self._rows = []
self._bootstrap(source)
@classmethod
def spawn(cls, source, params: dict) -> 'TableSource':
"""Alternative constructor.
:param source:
:param params:
"""
return cls(source, options=params.get('options'))
def _server_get_filter(self, source: dict) -> TableItemsFilter:
"""Returns a filter object composed from source dictionary
(e.g. POST params).
:param source:
"""
by_idx = self._columns_by_idx
re_def = self._RE_COLUMN_DEF
order = []
length_default = 10
length = int(source.get('length', length_default))
if length > 5000:
length = length_default
start = int(source.get('start', 0))
items_filter = TableItemsFilter(
start=start,
length=length,
search=source.get('search[value]', '').strip() or '',
order=order,
)
source = dict(sorted(source.items(), key=lambda item: item[0]))
for key, val in source.items():
if key.startswith('order'):
match = re_def.search(key)
if not match:
continue
if match.group(2) == 'dir':
continue
column_idx = int(val)
column_name = by_idx.get(column_idx)
if not column_name:
continue
order_desc = source.get(f'order[{match.group(1)}][dir]', 'asc') == 'desc'
order.append(f"{'-' if order_desc else ''}{column_name}")
return items_filter
def _server_get_items(self, items_filter: TableItemsFilter = None) -> TypeServerItems:
"""Must return serverside items filtered using th given filter.
:param items_filter:
"""
raise NotImplementedError # pragma: nocover
def _server_prepare_items(self, items: TypeFilteredItems) -> TypePreparedItems:
"""Prepares items for on_server response.
:param items:
"""
return items
def respond(self, request: HttpRequest) -> JsonResponse:
"""
https://datatables.net/manual/server-side
:param request:
"""
source = request.POST
items_filter = self._server_get_filter(source.dict())
count_total, count_filtered, filtered = self._server_get_items(items_filter)
start = items_filter.start
filtered = filtered[start:start+items_filter.length]
filtered = self._server_prepare_items(filtered)
draw = source.get('draw', 1)
draw = int(draw) # As per docs.
out = {
'data': filtered,
'draw': draw,
'recordsTotal': count_total,
'recordsFiltered': count_filtered,
}
return JsonResponse(out)
def _get_columns(self) -> TypeTableColumns:
"""Should return columns dictionary."""
columns = {}
for name, title in self.options.get('columns_add', {}).items():
columns[name] = TableColumn(name=name, title=title)
return columns
def _bootstrap(self, source: TypeTableSource):
"""The place for a source-specific bootstrap."""
columns = self._get_columns()
self.columns = columns
self._columns_by_idx = {idx: column for idx, column in enumerate(columns)}
def contribute_to_config(self, config: dict, table: 'Table'):
"""Updates table configuration dictionary with source-specific params.
:param config:
:param table:
"""
config.update({
'createdRow': lambda: (
"function(row, data, idx){var v=data['%s']; if (v){$(row).attr('data-id', v);}}" % self.row_id),
'processing': True,
'columns': [column.as_dict() for column in self.columns.values()],
})
options = self.options
if options.get('on_server', False):
url_responder = self._url_responder
if url_responder is None:
url_responder = self.__class__._url_responder = reverse('sitetables:respond')
config.update({
'serverSide': True,
'ajax': {
'url': url_responder,
'type': 'POST',
'data': {
'tableName': table.name,
}
},
})
else:
if not options.get('init_dom'):
# todo maybe use serialization instead of string casting
# todo FK support
config['data'] = [{k: f'{v}' for k, v in row.items()} for row in self.rows]
@property
def rows(self) -> List[dict]:
"""Represents table rows."""
return self._rows
class ListDictsSource(TableSource):
"""Static data source.
.. code-block:: python
source = [
{
'one': '1',
'two': '2',
},
{
'one': '3',
'two': '4',
},
]
"""
def _bootstrap(self, source: TypeTableSource):
names = list(source[0].keys())
self.options['columns_add'] = dict.fromkeys(names, '')
self._rows = source
self.row_id = names[0] # Use first column value.
super()._bootstrap(source)
class ModelSource(TableSource):
"""Django model datasource.
.. code-block:: python
source = Article # Model class.
source = Article.objects.filter(hidden=False) # Or a QuerySet.
"""
model: Type[Model] = None
def _get_columns(self) -> TypeTableColumns:
columns = {}
meta = self.model._meta
for field in chain(meta.concrete_fields, meta.private_fields, meta.many_to_many):
name = field.name
columns[name] = TableColumn(name=name, title=field.verbose_name, source=field)
columns.update(super()._get_columns())
return columns
def _bootstrap(self, source: TypeTableSource):
if isinstance(source, QuerySet):
model = source.model
qs = source
else:
# Model class
model = source
qs = model.objects.all()
self.model = model
self.qs = qs
self.row_id = model._meta.pk.name
super()._bootstrap(source)
def _server_get_items(self, items_filter: TableItemsFilter = None) -> TypeServerItems:
qs = self.qs
filter_kwargs = {}
search = items_filter.search
if search:
filter_kwargs['title__contains'] = search
objects = qs.filter(**filter_kwargs)
count_total = qs.count()
count_filtered = objects.count()
order = items_filter.order
if order:
objects = objects.order_by(*order)
return count_total, count_filtered, objects
def _server_prepare_items(self, items: TypeFilteredItems) -> TypePreparedItems:
dicts = []
columns = self.columns
for model in items:
item_data = {}
for column_name, column in columns.items():
if column.source is None:
# Model property.
item_data[column_name] = getattr(model, column_name)
else:
# Model field.
item_data[column_name] = column.source.value_from_object(model)
dicts.append(item_data)
return dicts
@property
def rows(self) -> List[dict]:
columns = self.columns
_, _, qs = self._server_get_items(TableItemsFilter(
start=0,
length=0,
search='',
order=[],
))
result = qs.values(*columns.keys())
return result
| [
"[email protected]"
] | |
ab0d95439f8363b720d81aa80ae3aa74a0432e28 | 104005986bccea0a4213cbd55d833c95baf2f4fa | /drivers/phot_drivers/LCOGT_template_single_request.py | c6603728c1e635419c96b9c4a2e6edda588ecfe7 | [] | no_license | lgbouma/cdips_followup | 8a92ec9a31b405d316c668a6d42ce10ad47f0501 | 99ac6c6c709f96a58083a5ff7c4cf2d4f0b554a8 | refs/heads/master | 2023-08-14T02:33:17.841926 | 2023-08-01T00:46:19 | 2023-08-01T00:46:19 | 206,371,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,229 | py | """
Given a source_id, make LCOGT photometry followup requests, and optionally
submit them to the LCOGT API.
"""
import numpy as np
from astropy.time import Time
from cdips_followup.manage_ephemerides import (
query_ephemeris, get_ephemeris_uncertainty
)
from cdips_followup.LCOGT_dedicated_requests import (
get_dedicated_request,
given_dedicated_requests_validate_submit
)
from astrobase.services.identifiers import tic_to_gaiadr2
TRANSITTYPEDICT = {
'all': ['OIBEO', 'IBEO', 'OIBE', 'OIB', 'BEO'],
'partials': ['OIB', 'BEO'],
'totals': ['OIBEO', 'IBEO', 'OIBE'],
'fulltotals': ['OIBEO']
}
def main():
##########################################
# CHANGE BELOW
savstr = '20230419_tic402980664_23B' # eg, 20191207_TOI1098_request_2m_tc_secondary. "ephemupdate" if it is one. (this cancels pending observations)
overwrite = 1
validate = 0
submit = 0
tic_id = '402980664' # '120105470'
source_id = None # '6113920619134019456' # can use instead of TIC
filtermode = 'ip'# 'zs', 'gp', 'ip'
#telescope_class = '1m0' # '1m0', '2m0', 'special'
telescope_class = 'special' # '1m0', '2m0', 'special'
ipp_value = 1 # usually 1
#max_search_time = Time('2022-12-31 23:59:00')
max_search_time = Time('2024-01-31 23:59:00')
verify_ephemeris_uncertainty = 1 # require t_tra uncertainty < 2 hours
inflate_duration = 0 # if t_tra uncertainty > 1 hour, inflate tdur by +/- 45 minutes per side
transit_type = 'totals' # see above
max_n_events = 99 # else None. n_events is per eventclass.
raise_error = False # raise an error if max_duration_error flag raised.
max_duration_error = 30 # the submitted LCOGT request must match requested durn to within this difference [minutes]
sites = ['Palomar'] # Default None for LCOGT. Could do e.g., 'special' and ['Keck Observatory']
#sites = ['Keck Observatory'] # Default None for LCOGT. Could do e.g., 'special' and ['Keck Observatory']
#sites = ['Cerro Paranal'] # Default None for LCOGT. Could do e.g., 'special' and ['Keck Observatory']
force_acceptability = 50 # None or int.
# CHANGE ABOVE
##########################################
max_airmass_sched = 2.5
manual_ephemeris = False
manual_ephemeris = True # FIXME
create_eventclasses = TRANSITTYPEDICT[transit_type]
submit_eventclasses = TRANSITTYPEDICT[transit_type]
if source_id is None:
assert isinstance(tic_id, str)
source_id = tic_to_gaiadr2(tic_id)
if manual_ephemeris:
period = 18.559/24
period_unc = 0.001/24
epoch = 2457000 + 1791.2972827806442
epoch_unc = 1e-5
duration = 1.04
else:
# get ephemeris from ephemerides.csv
d = query_ephemeris(source_id=source_id)
period, epoch, duration = (
d['period'], d['epoch'], d['duration']
)
period_unc, epoch_unc, duration_unc = (
d['period_unc'], d['epoch_unc'], d['duration_unc']
)
if verify_ephemeris_uncertainty:
delta_t_tra_today = (
get_ephemeris_uncertainty(epoch, epoch_unc, period, period_unc, epoch_obs='today')
)
if delta_t_tra_today*24 < 0:
msg = f'ERR! Got negative ephem unc of {delta_t_tra_today*24:.1f} hr. Need to give a believable ephem unc..'
raise ValueError(msg)
if delta_t_tra_today*24 > 2:
msg = f'ERR! Got ephem unc of {delta_t_tra_today*24:.1f} hr. This is too high.'
raise ValueError(msg)
if delta_t_tra_today*24 > 1:
msg = f'WRN! Got ephem unc of {delta_t_tra_today*24:.1f} hr. This is risky.'
print(msg)
else:
msg = f'INFO! Got ephem unc of {delta_t_tra_today*24:.1f} hr. This is fine.'
print(msg)
if inflate_duration:
assert verify_ephemeris_uncertainty
if delta_t_tra_today*24 > 1:
msg = f'... inflating transit duration for scheduling pursposes by 1.5 hours.'
print(msg)
duration += 1.5 # add
# "requests" is a list of lists. Higher level is each eventclass. Level
# below is each event, in that eventclass.
requests = get_dedicated_request(
savstr, source_id, period, epoch, duration, create_eventclasses,
overwrite=overwrite, max_search_time=max_search_time,
filtermode=filtermode, telescope_class=telescope_class,
ipp_value=ipp_value, sites=sites,
force_acceptability=force_acceptability,
max_airmass_sched=max_airmass_sched
)
# if a maximum number of events is set, impose it!
if isinstance(max_n_events, int):
_requests = []
for ix in range(len(create_eventclasses)):
print('starting with {} {} events.'.
format(len(requests[ix]), create_eventclasses[ix])
)
for eventclass in requests:
_eventclass = []
starttimes = []
for req in eventclass:
starttimes.append(req['requests'][0]['windows'][0]['start'])
# sort by start time, cut to get the closest ones.
sort_times = np.sort(starttimes)
sel_times = sort_times[ : max_n_events]
for req in eventclass:
starttime = req['requests'][0]['windows'][0]['start']
if starttime in sel_times:
_eventclass.append(req)
if len(_eventclass) > 0:
_requests.append(_eventclass)
if len(_requests) == 0:
print('WRN!: got no times')
return
assert len(_requests[0]) <= max_n_events
requests = _requests
print('WRN!: trimmed to {} events.'.format(len(requests[0])))
if len(sel_times)>0:
print('WRN!: max time: \n{}'.format(repr(sel_times[-1])))
print('\nWRN!: selected times: \n{}'.format(repr(sel_times)))
else:
print('WRN!: got no times')
given_dedicated_requests_validate_submit(
requests, submit_eventclasses, validate=validate, submit=submit,
max_duration_error=max_duration_error, raise_error=raise_error
)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
10c75430230872f750e9ed2c0a241436c9120a7f | b509ef07d752e987f4cb84d1abd4c3a98488a6c7 | /resources/lib/streamlink/plugins/nownews.py | 02bd76def1234a8b05929f26bb670853a147f7ba | [
"BSD-2-Clause"
] | permissive | Twilight0/script.module.streamlink.base | d91245d1a43d6b3191b62a6eb4b1cf70598ed23e | c1e4628715a81806586b10323b8cb01424bbb6fc | refs/heads/master | 2021-01-21T04:32:41.658823 | 2020-09-07T20:56:29 | 2020-09-07T20:56:29 | 101,915,967 | 6 | 4 | BSD-2-Clause | 2018-01-14T15:20:47 | 2017-08-30T18:31:47 | Python | UTF-8 | Python | false | false | 2,149 | py | import logging
import re
import json
from streamlink.plugin import Plugin
from streamlink.stream import HLSStream
log = logging.getLogger(__name__)
class NowNews(Plugin):
_url_re = re.compile(r"https?://news.now.com/home/live")
epg_re = re.compile(r'''epg.getEPG\("(\d+)"\);''')
api_url = "https://hkt-mobile-api.nowtv.now.com/09/1/getLiveURL"
backup_332_api = "https://d7lz7jwg8uwgn.cloudfront.net/apps_resource/news/live.json"
backup_332_stream = "https://d3i3yn6xwv1jpw.cloudfront.net/live/now332/playlist.m3u8"
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
def _get_streams(self):
res = self.session.http.get(self.url)
m = self.epg_re.search(res.text)
channel_id = m and m.group(1)
if channel_id:
log.debug("Channel ID: {0}".format(channel_id))
if channel_id == "332":
# there is a special backup stream for channel 332
bk_res = self.session.http.get(self.backup_332_api)
bk_data = self.session.http.json(bk_res)
if bk_data and bk_data["backup"]:
log.info("Using backup stream for channel 332")
return HLSStream.parse_variant_playlist(self.session, self.backup_332_stream)
api_res = self.session.http.post(self.api_url,
headers={"Content-Type": 'application/json'},
data=json.dumps(dict(channelno=channel_id,
mode="prod",
audioCode="",
format="HLS",
callerReferenceNo="20140702122500")))
data = self.session.http.json(api_res)
for stream_url in data.get("asset", {}).get("hls", {}).get("adaptive", []):
return HLSStream.parse_variant_playlist(self.session, stream_url)
__plugin__ = NowNews
| [
"[email protected]"
] | |
a5a17178600de20cbfc8a242569037482fae9caf | fccb5a43179906ddc3dd37849ac2a89cacf44981 | /sphinx/source/exercises/solution/03_os_sub_req/ex5.py | 653a604a993839e3b042cfc9ccaf6cd8eba8ff1f | [] | no_license | YasmineOweda/spring2021 | a48c1c4eaa525053a0e2188cf088124b004a35d8 | 072aadba20bfbc659427265fa228518fe4b09ff3 | refs/heads/master | 2023-04-29T10:20:14.132211 | 2021-05-11T09:07:40 | 2021-05-11T09:07:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | import os
#1
os.mkdir('os_exercises.')
#2
os.chdir('os_exercises')
open('exercise.py', 'w')
#3
x = input('Please write something to the file: ')
with open('exercise.py', 'w') as f:
f.write(x)
#4
x = input('Please write something More to anoter file: ')
with open('exercise2.py', 'w') as f:
f.write(x)
#5
with open('exercise.py', 'r') as f1:
with open('exercise2.py', 'r' ) as f2:
print(f1.read() + f2.read())
| [
"[email protected]"
] | |
db3b4d13adbd04eba6106f6e0d8559771deadcd5 | 61699048dc567cd3a814e5b987599dae175bed19 | /Python/month01/day15/exercise02.py | ba4af22e18080c30f44bdc184166efdfe0b8e96a | [] | no_license | Courage-GL/FileCode | 1d4769556a0fe0b9ed0bd02485bb4b5a89c9830b | 2d0caf3a422472604f073325c5c716ddd5945845 | refs/heads/main | 2022-12-31T17:20:59.245753 | 2020-10-27T01:42:50 | 2020-10-27T01:42:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | """
练习2:定义函数,根据生日(年月日),计算活了多天.
输入:2010 1 1
输出:从2010年1月1日到现在总共活了3910天
"""
import time
def life_days(year, month, day):
# 当前 - 出生时间
# time_tuple = time.strptime("%d-%d-%d" % (year, month, day), "%Y-%m-%d")
time_tuple = (year, month, day, 0, 0, 0, 0, 0, 0)
life_second = time.time() - \
time.mktime(time_tuple)
return life_second / 60 / 60 / 24
y = 1990
m = 9
d = 18
result = life_days(y, m, d)
print(f"从{y}年{m}月{d}日到现在总共活了{result:.0f}天")
| [
"[email protected]"
] | |
ebce17fb0dd02ef5af320607dbcfad78bb6aec8c | dcd0fb6bdcb488dd2046778eb02edce8f4623b58 | /object_follow_edgetpu/detect_standalone.py | 7e196dbb4d1727616b1a5ec9f56384351df24223 | [] | no_license | openbsod/Adeept_AWR | 12f2df24bfcf85d7965a425bb0078b2c858e807a | 92ca5e7147a9cb44ad55f55a467371648dc76b3c | refs/heads/master | 2023-04-09T07:06:35.772918 | 2021-04-15T21:20:40 | 2021-04-15T21:20:40 | 284,012,618 | 1 | 0 | null | 2020-07-31T10:46:50 | 2020-07-31T10:46:49 | null | UTF-8 | Python | false | false | 4,801 | py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Object detection demo.
This demo script requires Raspberry Pi Camera, and pre-compiled mode.
Get pre-compiled model from Coral website [1]
[1]: https://dl.google.com/coral/canned_models/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite
"""
from edgetpu.detection.engine import DetectionEngine
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
import numpy as np
import time
import io
import picamera
# https://github.com/waveform80/picamera/issues/383
def _monkey_patch_picamera():
original_send_buffer = picamera.mmalobj.MMALPortPool.send_buffer
def silent_send_buffer(zelf, *args, **kwargs):
try:
original_send_buffer(zelf, *args, **kwargs)
except picamera.exc.PiCameraMMALError as error:
if error.status != 14:
raise error
picamera.mmalobj.MMALPortPool.send_buffer = silent_send_buffer
# Read labels.txt file provided by Coral website
def _read_label_file(file_path):
with open(file_path, 'r', encoding="utf-8") as f:
lines = f.readlines()
ret = {}
for line in lines:
pair = line.strip().split(maxsplit=1)
ret[int(pair[0])] = pair[1].strip()
return ret
# Main loop
def main():
model_filename = "mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite"
label_filename = "coco_labels.txt"
engine = DetectionEngine(model_filename)
labels = _read_label_file(label_filename)
CAMERA_WIDTH = 640
CAMERA_HEIGHT = 480
fnt = ImageFont.load_default()
# To view preview on VNC,
# https://raspberrypi.stackexchange.com/a/74390
with picamera.PiCamera() as camera:
_monkey_patch_picamera()
camera.resolution = (CAMERA_WIDTH, CAMERA_HEIGHT)
camera.framerate = 15
camera.rotation = 180
_, width, height, channels = engine.get_input_tensor_shape()
print("{}, {}".format(width, height))
overlay_renderer = None
camera.start_preview()
try:
stream = io.BytesIO()
for foo in camera.capture_continuous(stream,
format='rgb',
use_video_port=True):
# Make Image object from camera stream
stream.truncate()
stream.seek(0)
input = np.frombuffer(stream.getvalue(), dtype=np.uint8)
input = input.reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3))
image = Image.fromarray(input)
# image.save("out.jpg")
# Make overlay image plane
img = Image.new('RGBA',
(CAMERA_WIDTH, CAMERA_HEIGHT),
(255, 0, 0, 0))
draw = ImageDraw.Draw(img)
# Run detection
start_ms = time.time()
results = engine.DetectWithImage(image,
threshold=0.2, top_k=10)
elapsed_ms = (time.time() - start_ms)*1000.0
if results:
for obj in results:
box = obj.bounding_box.flatten().tolist()
box[0] *= CAMERA_WIDTH
box[1] *= CAMERA_HEIGHT
box[2] *= CAMERA_WIDTH
box[3] *= CAMERA_HEIGHT
# print(box)
# print(labels[obj.label_id])
draw.rectangle(box, outline='red')
draw.text((box[0], box[1]-10), labels[obj.label_id],
font=fnt, fill="red")
camera.annotate_text = "{0:.2f}ms".format(elapsed_ms)
if not overlay_renderer:
overlay_renderer = camera.add_overlay(
img.tobytes(),
size=(CAMERA_WIDTH, CAMERA_HEIGHT), layer=4, alpha=255)
else:
overlay_renderer.update(img.tobytes())
finally:
if overlay_renderer:
camera.remove_overlay(overlay_renderer)
camera.stop_preview()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
a76bbe862fc2f943b5866b00388228264612f33d | 6d4af63e07a137d382ef61afe8276f7470b7af59 | /wsgistate/__init__.py | 742cd2a8b2a8e916a3427188ed7f1c260ff1b2b1 | [] | no_license | Cromlech/wsgistate | 142c7016c74fc28e6c56368f018bf113c379118c | d730ee47a4a43efbd20bcb9623e76bedeeb8c62b | refs/heads/master | 2023-04-11T14:10:20.522520 | 2023-04-11T10:06:10 | 2023-04-11T10:06:10 | 15,806,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,085 | py | # Copyright (c) 2005 Allan Saddi <[email protected]>
# Copyright (c) 2005, the Lawrence Journal-World
# Copyright (c) 2006 L. C. Rees
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Django nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
'''Base Cache class'''
__all__ = ['BaseCache', 'db', 'file', 'memory', 'memcached',
'session', 'simple', 'cache']
def synchronized(func):
'''Decorator to lock and unlock a method (Phillip J. Eby).
@param func Method to decorate
'''
def wrapper(self, *__args, **__kw):
self._lock.acquire()
try:
return func(self, *__args, **__kw)
finally:
self._lock.release()
wrapper.__name__ = func.__name__
wrapper.__dict__ = func.__dict__
wrapper.__doc__ = func.__doc__
return wrapper
class BaseCache(object):
'''Base Cache class.'''
def __init__(self, *a, **kw):
super(BaseCache, self).__init__()
timeout = kw.get('timeout', 300)
try:
timeout = int(timeout)
except (ValueError, TypeError):
timeout = 300
self.timeout = timeout
def __getitem__(self, key):
'''Fetch a given key from the cache.'''
return self.get(key)
def __setitem__(self, key, value):
'''Set a value in the cache. '''
self.set(key, value)
def __delitem__(self, key):
'''Delete a key from the cache.'''
self.delete(key)
def __contains__(self, key):
'''Tell if a given key is in the cache.'''
return self.get(key) is not None
def get(self, key, default=None):
'''Fetch a given key from the cache. If the key does not exist, return
default, which itself defaults to None.
@param key Keyword of item in cache.
@param default Default value (default: None)
'''
raise NotImplementedError()
def set(self, key, value):
'''Set a value in the cache.
@param key Keyword of item in cache.
@param value Value to be inserted in cache.
'''
raise NotImplementedError()
def delete(self, key):
'''Delete a key from the cache, failing silently.
@param key Keyword of item in cache.
'''
raise NotImplementedError()
def get_many(self, keys):
'''Fetch a bunch of keys from the cache. Returns a dict mapping each
key in keys to its value. If the given key is missing, it will be
missing from the response dict.
@param keys Keywords of items in cache.
'''
d = dict()
for k in keys:
val = self.get(k)
if val is not None:
d[k] = val
return d
| [
"[email protected]"
] | |
a658a0212b71fb6327314f0662b6143017559bc1 | df2cbe914f463ad050d7ed26194424afbe3a0a52 | /addons/snailmail/models/mail_notification.py | a368c0a778338b68f037181c93c3d78bffc3f691 | [
"Apache-2.0"
] | permissive | SHIVJITH/Odoo_Machine_Test | 019ed339e995be980606a2d87a63312ddc18e706 | 310497a9872db7844b521e6dab5f7a9f61d365a4 | refs/heads/main | 2023-07-16T16:23:14.300656 | 2021-08-29T11:48:36 | 2021-08-29T11:48:36 | 401,010,175 | 0 | 0 | Apache-2.0 | 2021-08-29T10:13:58 | 2021-08-29T10:13:58 | null | UTF-8 | Python | false | false | 719 | py | # -*- coding: utf-8 -*-
from odoo import fields, models
class Notification(models.Model):
_inherit = 'mail.notification'
notification_type = fields.Selection(selection_add=[('snail', 'Snailmail')], ondelete={'snail': 'cascade'})
letter_id = fields.Many2one('snailmail.letter', string="Snailmail Letter", index=True, ondelete='cascade')
failure_type = fields.Selection(selection_add=[
('sn_credit', "Snailmail Credit Error"),
('sn_trial', "Snailmail Trial Error"),
('sn_price', "Snailmail No Price Available"),
('sn_fields', "Snailmail Missing Required Fields"),
('sn_format', "Snailmail Format Error"),
('sn_error', "Snailmail Unknown Error"),
])
| [
"[email protected]"
] | |
de8b449316abbe86696e3641635d94af6d290c5d | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/caffe2/python/operator_test/stats_put_ops_test.py | 2ce56248c5dd0116931f91de9b4b556dd881e73b | [
"MIT"
] | permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:86a74bb87f96bd8ebf2fa9ae72729c5cbe121a32edc1fb034496e084703631b3
size 6596
| [
"[email protected]"
] | |
a35e6a756f615aca80c4b91a8b264a5aa0cd6d0e | 9cd00edd008ce38ea3127f090b6867a91fe7193d | /src/plot_Qle_at_all_events_above_Tthreh.py | 382993ac07bd63823ff8cd12124f714a8056199b | [] | no_license | shaoxiuma/heatwave_coupling | c5a2a2bba53351597f4cb60ecb446bfb9629812f | 459f6bc72402b5dd3edf49bc3b9be380b5f54705 | refs/heads/master | 2021-09-13T06:50:48.733659 | 2018-04-26T06:09:54 | 2018-04-26T06:09:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,338 | py | #!/usr/bin/env python
"""
For each of the OzFlux/FLUXNET2015 sites, plot the TXx and T-4 days
Qle and bowen ratio
That's all folks.
"""
__author__ = "Martin De Kauwe"
__version__ = "1.0 (20.04.2018)"
__email__ = "[email protected]"
import os
import sys
import glob
import netCDF4 as nc
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
import pandas as pd
import re
import constants as c
def main(fname):
plot_dir = "plots"
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
df = pd.read_csv(fname)
df = df[df.pft == "EBF"]
df = df[~np.isnan(df.temp)]
#width = 12.0
#height = width / 1.618
#print(width, height)
#sys.exit()
width = 14
height = 10
fig = plt.figure(figsize=(width, height))
fig.subplots_adjust(hspace=0.05)
fig.subplots_adjust(wspace=0.05)
plt.rcParams['text.usetex'] = False
plt.rcParams['font.family'] = "sans-serif"
plt.rcParams['font.sans-serif'] = "Helvetica"
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['font.size'] = 14
plt.rcParams['legend.fontsize'] = 10
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 14
count = 0
sites = np.unique(df.site)
for site in sites:
site_name = re.sub(r"(\w)([A-Z])", r"\1 \2", site)
ax = fig.add_subplot(3,3,1+count)
df_site = df[df.site == site]
events = int(len(df_site)/4)
cnt = 0
for e in range(0, events):
from scipy import stats
x = df_site["temp"][cnt:cnt+4]
y = df_site["Qle"][cnt:cnt+4]
slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
if slope > 0.0 and p_value <= 0.05:
ax.plot(df_site["temp"][cnt:cnt+4], df_site["Qle"][cnt:cnt+4],
label=site, ls="-", marker="o", zorder=100)
elif slope > 0.0 and p_value > 0.05:
ax.plot(df_site["temp"][cnt:cnt+4], df_site["Qle"][cnt:cnt+4],
label=site, ls="-", marker="o", color="lightgrey",
zorder=1)
cnt += 4
if count == 0:
ax.set_ylabel("Qle (W m$^{-2}$)", position=(0.5, 0.0))
if count == 4:
#ax.set_xlabel('Temperature ($^\circ$C)', position=(1.0, 0.5))
ax.set_xlabel('Temperature ($^\circ$C)')
if count < 3:
plt.setp(ax.get_xticklabels(), visible=False)
if count != 0 and count != 3:
plt.setp(ax.get_yticklabels(), visible=False)
props = dict(boxstyle='round', facecolor='white', alpha=1.0,
ec="white")
ax.text(0.04, 0.95, site_name,
transform=ax.transAxes, fontsize=14, verticalalignment='top',
bbox=props)
from matplotlib.ticker import MaxNLocator
ax.yaxis.set_major_locator(MaxNLocator(4))
ax.set_ylim(0, 280)
ax.set_xlim(15, 50)
count += 1
ofdir = "/Users/mdekauwe/Dropbox/fluxnet_heatwaves_paper/figures/figs"
fig.savefig(os.path.join(ofdir, "all_events.pdf"),
bbox_inches='tight', pad_inches=0.1)
#plt.show()
if __name__ == "__main__":
data_dir = "outputs/"
fname = "ozflux_all_events.csv"
fname = os.path.join(data_dir, fname)
main(fname)
| [
"[email protected]"
] | |
298bdb7986c7ce282903098e71efc3e61ebde167 | 4b0c57dddf8bd98c021e0967b5d94563d15372e1 | /run_MatrixElement/test/emptyPSets/emptyPSet_qqH125_cfg.py | 1925d9eb5134f84222300788d85f42237860a66f | [] | no_license | aperloff/TAMUWW | fea6ed0066f3f2cef4d44c525ee843c6234460ba | c18e4b7822076bf74ee919509a6bd1f3cf780e11 | refs/heads/master | 2021-01-21T14:12:34.813887 | 2018-07-23T04:59:40 | 2018-07-23T04:59:40 | 10,922,954 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | import FWCore.ParameterSet.Config as cms
import os
#!
#! PROCESS
#!
process = cms.Process("MatrixElementProcess")
#!
#! SERVICES
#!
#process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageLogger.MessageLogger_cfi')
process.MessageLogger.cerr.FwkReport.reportEvery = 5000
process.load('CommonTools.UtilAlgos.TFileService_cfi')
process.TFileService.fileName=cms.string('qqH125.root')
#!
#! INPUT
#!
inputFiles = cms.untracked.vstring(
'root://cmsxrootd.fnal.gov//store/user/aperloff/MatrixElement/Summer12ME8TeV/MEInput/qqH125.root'
)
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(10))
process.source = cms.Source("PoolSource",
skipEvents = cms.untracked.uint32(0),
fileNames = inputFiles )
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
| [
"[email protected]"
] | |
afbde151e2e1473b1d6aa573579299dc0eb3ce8d | 18c03a43ce50ee0129f9f45ada1bdaa2ff4f5774 | /epistasis/__init__.py | 4f9536d756aca5c653b3e69bbff59937aa2ff678 | [
"Unlicense"
] | permissive | harmsm/epistasis | acf7b5678b328527b2c0063f81d512fcbcd78ce1 | f098700c15dbd93977d797a1a1708b4cfb6037b3 | refs/heads/master | 2022-04-30T13:09:49.106984 | 2022-03-19T05:29:37 | 2022-03-19T05:29:37 | 150,969,948 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,105 | py | """\
A Python API for modeling statistical, high-order epistasis in genotype-phenotype maps.
This library provides methods for:
1. Decomposing genotype-phenotype maps into high-order epistatic interactions
2. Finding nonlinear scales in the genotype-phenotype map
3. Calculating the contributions of different epistatic orders
4. Estimating the uncertainty of epistatic coefficients amd
5. Interpreting the evolutionary importance of high-order interactions.
For more information about the epistasis models in this library, see our Genetics paper:
`Sailer, Z. R., & Harms, M. J. (2017). "Detecting High-Order Epistasis in Nonlinear Genotype-Phenotype Maps." Genetics, 205(3), 1079-1088.`_
.. _`Sailer, Z. R., & Harms, M. J. (2017). "Detecting High-Order Epistasis in Nonlinear Genotype-Phenotype Maps." Genetics, 205(3), 1079-1088.`: http://www.genetics.org/content/205/3/1079
Currently, this package works only as an API and there is no command-line
interface. Instead, we encourage you use this package inside `Jupyter notebooks`_ .
"""
from .__version__ import __version__
| [
"[email protected]"
] | |
d8e42f2ce2432b336adb63018b3a51e93aacef6d | 1c0542cef2ac6a5fb691602887236bf70f9bf71f | /speed_test_sar/sfsi_speed/mmcls/models/backbones/utils/gumbel_sigmoid.py | 6610270f02c80a91e8e61cd013f8b7dff68c6ba3 | [
"Apache-2.0"
] | permissive | yizenghan/sarNet | 683f45620013f906cb8a550713e786787074a8ae | d47a6e243677811b259a753233fbbaf86d2c9c97 | refs/heads/master | 2023-07-16T02:09:11.913765 | 2021-08-30T02:04:02 | 2021-08-30T02:04:02 | 299,276,627 | 11 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,723 | py | import torch
from torch import nn
class GumbelSigmoid(nn.Module):
def __init__(self, max_T, decay_alpha, decay_method='exp', start_iter=0):
super(GumbelSigmoid, self).__init__()
self.max_T = max_T
self.cur_T = max_T
self.step = 0
self.decay_alpha = decay_alpha
self.decay_method = decay_method
self.softmax = nn.Softmax(dim=1)
self.p_value = 1e-8
# self.cur_T = (self.decay_alpha ** start_iter) * self.cur_T
assert self.decay_method in ['exp', 'step', 'cosine']
def forward(self, x):
# Shape <x> : [N, C, H, W]
# Shape <r> : [N, C, H, W]
r = 1 - x
x = (x + self.p_value).log()
r = (r + self.p_value).log()
# Generate Noise
x_N = torch.rand_like(x)
r_N = torch.rand_like(r)
x_N = -1 * (x_N + self.p_value).log()
r_N = -1 * (r_N + self.p_value).log()
x_N = -1 * (x_N + self.p_value).log()
r_N = -1 * (r_N + self.p_value).log()
# Get Final Distribution
x = x + x_N
x = x / (self.cur_T + self.p_value)
r = r + r_N
r = r / (self.cur_T + self.p_value)
x = torch.cat((x, r), dim=1)
x = self.softmax(x)
x = x[:, [0], :, :]
if self.training:
self.cur_T = self.cur_T * self.decay_alpha
# if self.cur_T < 0.5 or not self.training:
# print('cur_T:{0}'.format(self.cur_T))
# self.step += 1
# if self.step % 50 == 0:
# print('cur_T:{0}'.format(self.cur_T))
#
return x
if __name__ == '__main__':
pass
# ToDo: Test Code Here.
# _test_T = 0.6
# Block = GumbelSigmoid(_test_T, 1.0)
| [
"[email protected]"
] | |
d8e6d6bc745881e200737675ec2cd28b084d364d | 68c003a526414fef3c23ad591982f1113ca8a72c | /api/urls.py | 6287d8ae58d870352565ce7f626f9a3aa7037130 | [] | no_license | pawanpaudel93/NepAmbulance | 9d99ef3a3592b3a17091889d9db32aa952974400 | b07dba43926c3f5a350b0acd75ac90b4842e3e32 | refs/heads/master | 2020-06-14T08:59:03.523102 | 2020-01-07T09:05:03 | 2020-01-07T09:05:03 | 194,965,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | from django.contrib import admin
from django.urls import path
from .views import ListCreateAmbulance, RetrieveUpdateDeleteAmbulance, ListDistrict, ListProvince
urlpatterns = [
path('ambulance/<int:province>/<slug:district>/<slug:city>/<int:ward>/', ListCreateAmbulance.as_view(), name="list-create-api"),
path('ambulance/<int:province>/<slug:district>/<slug:city>/<int:ward>/<int:pk>/', RetrieveUpdateDeleteAmbulance.as_view()),
# path('get/wards/<slug:city>/', ListWard.as_view(), name="get-wards"),
# path('get/cities/<slug:district>/', ListCity.as_view(), name='get-cities'),
path('get/districts/<slug:province>/', ListDistrict.as_view(), name='get-districts'),
path('get/provinces/', ListProvince.as_view(), name='get-provinces'),
] | [
"[email protected]"
] | |
a25e040005de4ab4ceb6b75d24ad6378699d31d5 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/0/xt.py | 2cabdcd454cba0a0dfcd2847512439922cc7dc0c | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'XT':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
6810448a2a2f895bb4d8c9a6ddda997f4967d5d2 | 99b8b8f06f2248a8ef940c0b5ba90d05f0362ba0 | /src/python/strelka/scanners/scan_pe.py | 626e9df031e01b48ea3c146b00d52c99f1d0d331 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | thezedwards/strelka | b5d794198791f04a9473ae4b7b2f8a75b7ccac9b | 9791ec50354459b4c80df6e95887e0d6bd58729a | refs/heads/master | 2020-05-24T12:34:15.926932 | 2019-05-16T20:51:40 | 2019-05-16T20:51:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,150 | py | import binascii
from datetime import datetime
import hashlib
import struct
import pefile
from strelka import strelka
IMAGE_MAGIC_LOOKUP = {
0x10b: '32_BIT',
0x20b: '64_BIT',
0x107: 'ROM_IMAGE',
}
class ScanPe(strelka.Scanner):
"""Collects metadata from PE files."""
def scan(self, data, file, options, expire_at):
self.event['total'] = {'sections': 0}
try:
pe = pefile.PE(data=data)
pe_dict = pe.dump_dict()
self.event['total']['sections'] = pe.FILE_HEADER.NumberOfSections
self.event['warnings'] = pe.get_warnings()
self.event['timestamp'] = datetime.utcfromtimestamp(pe.FILE_HEADER.TimeDateStamp).isoformat()
machine = pe.FILE_HEADER.Machine
self.event['machine'] = {
'id': machine,
'type': pefile.MACHINE_TYPE.get(machine),
}
# Reference: http://msdn.microsoft.com/en-us/library/windows/desktop/ms680339%28v=vs.85%29.aspx
self.event['image_magic'] = IMAGE_MAGIC_LOOKUP.get(pe.OPTIONAL_HEADER.Magic, 'Unknown')
subsystem = pe.OPTIONAL_HEADER.Subsystem
self.event['subsystem'] = pefile.SUBSYSTEM_TYPE.get(subsystem)
self.event['stack_reserve_size'] = pe.OPTIONAL_HEADER.SizeOfStackReserve
self.event['stack_commit_size'] = pe.OPTIONAL_HEADER.SizeOfStackCommit
self.event['heap_reserve_size'] = pe.OPTIONAL_HEADER.SizeOfHeapReserve
self.event['heap_commit_size'] = pe.OPTIONAL_HEADER.SizeOfHeapCommit
self.event['image_base'] = pe.OPTIONAL_HEADER.ImageBase
self.event['entry_point'] = pe.OPTIONAL_HEADER.AddressOfEntryPoint
self.event['image_characteristics'] = pe_dict.get('Flags')
self.event['dll_characteristics'] = pe_dict.get('DllCharacteristics')
try:
self.event['imphash'] = pe.get_imphash()
except AttributeError:
self.flags.append('no_imphash')
self.event.setdefault('export_functions', [])
export_symbols = pe_dict.get('Exported symbols', [])
for symbols in export_symbols:
name = symbols.get('Name')
if name is not None and isinstance(name, bytes) and name not in self.event['export_functions']:
self.event['export_functions'].append(name)
import_cache = {}
self.event.setdefault('imports', [])
import_symbols = pe_dict.get('Imported symbols', [])
for symbol in import_symbols:
for import_ in symbol:
dll = import_.get('DLL')
if dll is not None:
if dll not in self.event['imports']:
self.event['imports'].append(dll)
import_cache.setdefault(dll, [])
ordinal = import_.get('Ordinal')
if ordinal is not None:
ordinal = pefile.ordlookup.ordLookup(dll.lower(), ordinal, make_name=True)
import_cache[dll].append(ordinal)
name = import_.get('Name')
if name is not None:
import_cache[dll].append(name)
self.event.setdefault('import_functions', [])
for (import_, functions) in import_cache.items():
import_entry = {'import': import_, 'functions': functions}
if import_entry not in self.event['import_functions']:
self.event['import_functions'].append(import_entry)
self.event.setdefault('resources', [])
try:
for resource in pe.DIRECTORY_ENTRY_RESOURCE.entries:
res_type = pefile.RESOURCE_TYPE.get(resource.id, 'Unknown')
for entry in resource.directory.entries:
for e_entry in entry.directory.entries:
sublang = pefile.get_sublang_name_for_lang(
e_entry.data.lang,
e_entry.data.sublang,
)
offset = e_entry.data.struct.OffsetToData
size = e_entry.data.struct.Size
r_data = pe.get_data(offset, size)
language = pefile.LANG.get(e_entry.data.lang, 'Unknown')
data = {
'type': res_type,
'id': e_entry.id,
'name': e_entry.data.struct.name,
'offset': offset,
'size': size,
'sha256': hashlib.sha256(r_data).hexdigest(),
'sha1': hashlib.sha1(r_data).hexdigest(),
'md5': hashlib.md5(r_data).hexdigest(),
'language': language,
'sub_language': sublang,
}
if data not in self.event['resources']:
self.event['resources'].append(data)
except AttributeError:
self.flags.append('no_resources')
if hasattr(pe, 'DIRECTORY_ENTRY_DEBUG'):
debug = dict()
for e in pe.DIRECTORY_ENTRY_DEBUG:
rawData = pe.get_data(e.struct.AddressOfRawData, e.struct.SizeOfData)
if rawData.find(b'RSDS') != -1 and len(rawData) > 24:
pdb = rawData[rawData.find(b'RSDS'):]
debug['guid'] = b'%s-%s-%s-%s' % (
binascii.hexlify(pdb[4:8]),
binascii.hexlify(pdb[8:10]),
binascii.hexlify(pdb[10:12]),
binascii.hexlify(pdb[12:20]),
)
debug['age'] = struct.unpack('<L', pdb[20:24])[0]
debug['pdb'] = pdb[24:].rstrip(b'\x00')
self.event['rsds'] = debug
elif rawData.find(b'NB10') != -1 and len(rawData) > 16:
pdb = rawData[rawData.find(b'NB10') + 8:]
debug['created'] = struct.unpack('<L', pdb[0:4])[0]
debug['age'] = struct.unpack('<L', pdb[4:8])[0]
debug['pdb'] = pdb[8:].rstrip(b'\x00')
self.event['nb10'] = debug
self.event.setdefault('sections', [])
sections = pe_dict.get('PE Sections', [])
for section in sections:
section_entry = {
'name': section.get('Name', {}).get('Value', '').replace('\\x00', ''),
'flags': section.get('Flags', []),
'structure': section.get('Structure', ''),
}
if section_entry not in self.event['sections']:
self.event['sections'].append(section_entry)
security = pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']]
digital_signature_virtual_address = security.VirtualAddress
if security.Size > 0:
extract_data = pe.write()[digital_signature_virtual_address + 8:]
if len(extract_data) > 0:
self.flags.append('signed')
extract_file = strelka.File(
name='digital_signature',
source=self.name,
)
for c in strelka.chunk_string(extract_data):
self.upload_to_cache(
extract_file.pointer,
c,
expire_at,
)
self.files.append(extract_file)
else:
self.flags.append('empty_signature')
if hasattr(pe, 'FileInfo'):
self.event.setdefault('version_info', [])
for structure in pe.FileInfo:
for fileinfo in structure:
if fileinfo.Key.decode() == 'StringFileInfo':
for block in fileinfo.StringTable:
for name, value in block.entries.items():
fixedinfo = {
'name': name.decode(),
'value': value.decode(),
}
if fixedinfo not in self.event['version_info']:
self.event['version_info'].append(fixedinfo)
else:
self.flags.append('no_version_info')
except IndexError:
self.flags.append('index_error')
except pefile.PEFormatError:
self.flags.append('pe_format_error')
| [
"[email protected]"
] | |
e9a1e970d4704ef0445f93aed0cd5162806488f7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03273/s702731643.py | a626a36c61e3c295dfc6c90d75e2a4adb265c98f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | from collections import defaultdict
import itertools
import copy
def readInt():
return int(input())
def readInts():
return list(map(int, input().split()))
def readChar():
return input()
def readChars():
return input().split()
def p(arr,b="\n",e="\n"):
print(b,end="")
for i in arr:
for j in i:
print(j,end="")
print()
print(e,end="")
h,w = readInts()
a = [list(input()) for i in range(h)]
for i in range(h-1,-1,-1):
boo = 1
for j in range(w-1,-1,-1):
if a[i][j]=="#":
boo = 0
if boo==1:
del a[i]
for i in range(len(a[0])-1,-1,-1):
boo = 1
for j in range(len(a)-1,-1,-1):
if a[j][i]=="#":
boo = 0
if boo==1:
for j in range(len(a)-1,-1,-1):
del a[j][i]
p(a,b="",e="") | [
"[email protected]"
] | |
b041d27ad67048f098504f32a777272c2c0183c7 | 99218b477267dafe4b9e37a17df7f1cd7af28c78 | /fynd_test/wsgi.py | 23222bda9e292bc12fb0264613d4a79352997fdc | [] | no_license | nattesharan/fynd_test | da3367641cdd9d46220ba881680c7b809539d006 | 45d21e509379bb643630e374f9a31a9454f9f746 | refs/heads/master | 2022-12-01T17:49:38.299113 | 2020-08-16T16:19:23 | 2020-08-16T16:19:23 | 287,949,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for fynd_test project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fynd_test.settings')
application = get_wsgi_application()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.