blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
07d70ae42492f6ef1d0c9f70c89b683116d2d1fe | 0b01cb61a4ae4ae236a354cbfa23064e9057e434 | /alipay/aop/api/request/KoubeiMerchantKbcloudSubuserinfoQueryRequest.py | 25b94a0a4cf762b874f9cb73885384e42136d8d1 | [
"Apache-2.0"
] | permissive | hipacloud/alipay-sdk-python-all | e4aec2869bf1ea6f7c6fb97ac7cc724be44ecd13 | bdbffbc6d5c7a0a3dd9db69c99443f98aecf907d | refs/heads/master | 2022-11-14T11:12:24.441822 | 2020-07-14T03:12:15 | 2020-07-14T03:12:15 | 277,970,730 | 0 | 0 | Apache-2.0 | 2020-07-08T02:33:15 | 2020-07-08T02:33:14 | null | UTF-8 | Python | false | false | 4,021 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KoubeiMerchantKbcloudSubuserinfoQueryModel import KoubeiMerchantKbcloudSubuserinfoQueryModel
class KoubeiMerchantKbcloudSubuserinfoQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, KoubeiMerchantKbcloudSubuserinfoQueryModel):
self._biz_content = value
else:
self._biz_content = KoubeiMerchantKbcloudSubuserinfoQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'koubei.merchant.kbcloud.subuserinfo.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"[email protected]"
] | |
6d5689b96edd16de7af3d2cdb8ee31be61120d55 | dcbb531eada723b717cf7243fbeac6d3738007b4 | /chapter3/BX-CSV-Dump/users.py | ba7426d264ec460afc5d144cd1afc3500153ad3b | [] | no_license | yangtao0304/recommendation-system | 14a023a57d38a2450d44467bb85c441bd067e8f9 | 995b93ed0fd146d5bb6d837055b8e150a8b145c7 | refs/heads/master | 2020-09-12T05:56:00.173486 | 2020-03-10T01:24:28 | 2020-03-10T01:24:28 | 222,332,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | import pandas as pd
file_path = 'BX-Users.csv'
users = pd.read_table(file_path, sep=';', header=0, encoding='ISO-8859-1')
print('前5条数据为:\n{}\n'.format(users.head()))
print('总的数据条数为:\n{}\n'.format(users.count()))
print('年龄区间:<{},{}>'.format(users['Age'].min(), users['Age'].max()))
'''
总的数据条数为:
User-ID 278858
Location 278858
Age 168096
年龄区间:<0.0,244.0>
'''
# Age列,对于NULL,pandas处理为NaN
# 最大、最小年龄有误
# 这里可以采用1.符合事实范围的随机数;2.平均数填充
| [
"[email protected]"
] | |
06017e09936000545346137f186f35e3dd4590ef | a1aba83b90285def84cc425c0b089dd632a01a51 | /py千峰/day13线程与协程/xiecheng03.py | 8ba4d3827ffd07f072a48671005c6c1fcbd1b612 | [] | no_license | 15929134544/wangwang | 8ada14acb505576f07f01e37c936500ee95573a0 | 47f9abbf46f8d3cbc0698cb64c043735b06940d4 | refs/heads/master | 2023-05-11T19:59:54.462454 | 2021-05-25T15:19:43 | 2021-05-25T15:19:43 | 328,119,916 | 1 | 1 | null | 2021-05-11T16:13:18 | 2021-01-09T09:33:29 | JavaScript | UTF-8 | Python | false | false | 1,285 | py | """
greenlet已经实现了协程,但是这个是人工切换,是不是觉得太麻烦了,不要着急
python还有一个比greenlet更强大的并且能够自动切换任务的模块gevent
其原理就是当一个greenlet遇到了IO(指的是input output输入输出,比如网络、文件操作等)
操作时,比如访问网络,就自动切换到其他的greenlet,等到IO完成,
在适当的时候切换回来继续执行。
由于IO操作非常耗时,经常使程序处于等待状态,有了gevent我们自动切换协程,
就保证总有greenlet在运行,而不是等待IO。
"""
import time
import gevent as gevent
from greenlet import greenlet
from gevent import monkey
monkey.patch_all() # 打补丁
def a(): # 任务A
for i in range(5):
print('A' + str(i))
# gb.switch() # 切换
time.sleep(0.1)
def b(): # 任务B
for i in range(5):
print('B' + str(i))
# gc.switch()
time.sleep(0.1)
def c(): # 任务C
for i in range(5):
print('C' + str(i))
# ga.switch()
time.sleep(0.1)
if __name__ == '__main__':
g1 = gevent.spawn(a)
g2 = gevent.spawn(b)
g3 = gevent.spawn(c)
g1.join()
g2.join()
g3.join()
print('---------------') | [
"[email protected]"
] | |
64f802ee3da662f7515a4b931b1bd80bc895e282 | e2992e19ebc728387125a70c72a702a076de7a12 | /Python/01_My_Programs_Hv/05_List/102_C5_E3.py | 20429dcf098b179f726d90ec28f04fadd4ca8fe1 | [] | no_license | harsh1915/Machine_Learning | c9c32ed07df3b2648f7796f004ebb38726f13ae4 | c68a973cfbc6c60eeb94e253c6f2ce34baa3686e | refs/heads/main | 2023-08-27T15:01:16.430869 | 2021-11-15T07:53:36 | 2021-11-15T07:53:36 | 377,694,941 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | ls= ["abc", "def", "ghi"]
print(ls[0][::-1])
def list_reverse(ls):
ls1= []
for i in ls:
ls1.append(i[::-1])
return ls1
print(list_reverse(ls)) | [
"“[email protected]”"
] | |
a18d86d09a8f17900f98f2b1c6064003b6ee5ec0 | 50e10e8f304d32329ba88aa3fa8f8250c0a6a84d | /standard/girc.py | 594043511c56131f646724eb2d265123d12a8728 | [
"Apache-2.0"
] | permissive | candeira/duxlot | 0a1b4468e1d93f3db90219ea21d45a8e494aaabb | 69f4234e14ac8ad1ef53a0d663a7240d6e321e46 | refs/heads/master | 2021-01-20T04:26:10.588945 | 2012-09-13T17:00:18 | 2012-09-13T17:00:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,750 | py | # Copyright 2012, Sean B. Palmer
# Code at http://inamidst.com/duxlot/
# Apache License 2.0
# @@ this can't be named irc.py
import duxlot
# Save PEP 3122!
if "." in __name__:
from . import api
else:
import api
command = duxlot.command
# @@ ask, not tell yourself
# IRC
@command
def ask(env):
"Ask another user an enquiry"
if not env.arg:
return env.reply(ask.__doc__)
env.verb = "ask"
to(env)
# IRC
@command
def parsed_message(env):
"Show parsed input message"
env.reply(repr(env.message))
# IRC
@command
def schedule(env):
"Schedule an event"
# @@ database.timezones
if not env.arg:
return env.reply(schedule.__doc__)
t, text = env.arg.split(" ", 1)
t = float(t)
env.schedule((t, env.sender, env.nick, text))
env.reply("Scheduled")
# @@ test to make sure the right time is given!
# IRC
@command
def seen(env):
"Find out whether somebody has been around recently"
if not env.arg:
return env.say(seen.__doc__)
if env.arg == env.options["nick"]:
return env.reply("I'm right here")
# env.database.seen.get.verb.verb.verb
result = env.database.cache.seen.get(env.arg)
if not result:
env.say("Haven't seen %s" % env.arg)
else:
unixtime, place = result
offset, abbreviation = zone_from_nick(env, env.nick)
dt = api.clock.format_datetime(
format="%Y-%m-%d %H:%M:%S $TZ",
offset=offset,
tz=abbreviation,
unixtime=unixtime
)
env.say("On %s at %s" % (place, dt))
# IRC
# @@ a check that commands are covered here
@command
def stats(env):
"Display information about the most used commands"
usage = env.database.cache.usage
usage = sorted(((b, a) for (a, b) in usage.items()), reverse=True)
usage = list(usage)[:10]
usage = ["%s (%s)" % (b, a) for (a, b) in usage]
env.reply("Top used commands: " + ", ".join(usage))
# IRC
@command
def tell(env):
"Tell another user a message"
# Inspired by Monty, by Paul Mutton
# http://www.jibble.org/
if not env.arg:
return env.reply(tell.__doc__)
env.verb = "tell"
to(env)
# IRC
@command
def timezone(env):
"Set the user's timezone to an IANA Time Zone Database value"
tz = env.database.cache.timezones.get(env.nick, None)
if not env.arg:
if tz:
return env.reply("Your timezone is currently set to %s" % tz)
else:
return env.reply("You do not currently have a timezone set")
if env.arg in {"None", "-", "delete", "remove", "unset"}:
if tz is None:
return env.reply("You do not current have a timezone set")
with env.database.context("timezones") as timezones:
del timezones[env.nick]
return env.reply("Your timezone has been un-set")
if env.arg in {"geo", "guess"}:
zonename = api.geo.timezone_info(
address=env.message["prefix"]["host"]
).zone
else:
zonename = env.arg
import os.path
zoneinfo = env.options["zoneinfo"]
zonefile = os.path.join(zoneinfo, zonename)
try: opt = api.clock.zoneinfo_offset(filename=zonefile)
except Exception:
env.reply("Unrecognised zone. Try using one of the TZ fields here:")
env.reply("http://en.wikipedia.org/wiki/List_of_tz_database_time_zones")
else:
tz = round(opt.offset, 2)
with env.database.context("timezones") as timezones:
timezones[env.nick] = zonename
# message = "Set your zone to %s, which is currently %s (%s)"
message = "Set your TZ to %s; currently %s (UTC %s)"
hours = round(tz / 3600, 3)
hours = "+" + str(hours) if (hours >=0) else str(hours)
hours = hours.rstrip("0").rstrip(".")
env.reply(message % (zonename, opt.abbreviation, hours))
# @@ check nickname sanity
# IRC
@command
def to(env):
"Send a message to another user"
if not env.arg:
return env.reply(to.__doc__)
# import time
# could be partly moved to api?
recipient, message = env.arg.split(" ", 1)
# check syntax of env.nick!
# "self!" syntax to force a message to self
if env.nick == recipient:
return env.reply("You can tell yourself that")
if env.options["nick"] == recipient:
return env.reply("Understood")
if not hasattr(input, "verb"):
env.verb = None
# @@ check nick format
item = (int(time.time()), env.nick, env.verb, recipient, message)
with env.database.context("messages") as messages:
messages.setdefault(recipient, [])
messages[recipient].append(item)
env.reply("Will pass your message to %s" % recipient)
| [
"[email protected]"
] | |
5262ad751574f1650ce9fde9ee1b73565b930cb2 | d7379fa682e25d1d40b93b61dfe7c1fc2a64e0ff | /test/test_variables.py | fb481be5d642768a394481a1a887f86acd895855 | [
"Apache-2.0"
] | permissive | renuacpro/unit | f7b00cfc059b1ff9298824ead28b1ac404b86ff0 | 22c88f0253d57756ad541326df09d1398a871708 | refs/heads/master | 2022-12-10T08:27:15.371966 | 2020-09-07T12:21:14 | 2020-09-07T12:21:14 | 293,599,216 | 2 | 0 | null | 2020-09-07T18:08:47 | 2020-09-07T18:08:47 | null | UTF-8 | Python | false | false | 3,888 | py | from unit.applications.proto import TestApplicationProto
class TestVariables(TestApplicationProto):
prerequisites = {}
def setUp(self):
super().setUp()
self.assertIn(
'success',
self.conf(
{
"listeners": {"*:7080": {"pass": "routes/$method"}},
"routes": {
"GET": [{"action": {"return": 201}}],
"POST": [{"action": {"return": 202}}],
"3": [{"action": {"return": 203}}],
"4*": [{"action": {"return": 204}}],
"blahGET}": [{"action": {"return": 205}}],
"5GET": [{"action": {"return": 206}}],
"GETGET": [{"action": {"return": 207}}],
"localhost": [{"action": {"return": 208}}],
},
},
),
'configure routes',
)
def conf_routes(self, routes):
self.assertIn('success', self.conf(routes, 'listeners/*:7080/pass'))
def test_variables_method(self):
self.assertEqual(self.get()['status'], 201, 'method GET')
self.assertEqual(self.post()['status'], 202, 'method POST')
def test_variables_uri(self):
self.conf_routes("\"routes$uri\"")
self.assertEqual(self.get(url='/3')['status'], 203, 'uri')
self.assertEqual(self.get(url='/4*')['status'], 204, 'uri 2')
self.assertEqual(self.get(url='/4%2A')['status'], 204, 'uri 3')
def test_variables_host(self):
self.conf_routes("\"routes/$host\"")
def check_host(host, status=208):
self.assertEqual(
self.get(headers={'Host': host, 'Connection': 'close'})[
'status'
],
status,
)
check_host('localhost')
check_host('localhost.')
check_host('localhost:7080')
check_host('.localhost', 404)
check_host('www.localhost', 404)
check_host('localhost1', 404)
def test_variables_many(self):
self.conf_routes("\"routes$uri$method\"")
self.assertEqual(self.get(url='/5')['status'], 206, 'many')
self.conf_routes("\"routes${uri}${method}\"")
self.assertEqual(self.get(url='/5')['status'], 206, 'many 2')
self.conf_routes("\"routes${uri}$method\"")
self.assertEqual(self.get(url='/5')['status'], 206, 'many 3')
self.conf_routes("\"routes/$method$method\"")
self.assertEqual(self.get()['status'], 207, 'many 4')
self.conf_routes("\"routes/$method$uri\"")
self.assertEqual(self.get()['status'], 404, 'no route')
self.assertEqual(self.get(url='/blah')['status'], 404, 'no route 2')
def test_variables_replace(self):
self.assertEqual(self.get()['status'], 201)
self.conf_routes("\"routes$uri\"")
self.assertEqual(self.get(url='/3')['status'], 203)
self.conf_routes("\"routes/${method}\"")
self.assertEqual(self.post()['status'], 202)
self.conf_routes("\"routes${uri}\"")
self.assertEqual(self.get(url='/4*')['status'], 204)
self.conf_routes("\"routes/blah$method}\"")
self.assertEqual(self.get()['status'], 205)
def test_variables_invalid(self):
def check_variables(routes):
self.assertIn(
'error',
self.conf(routes, 'listeners/*:7080/pass'),
'invalid variables',
)
check_variables("\"routes$\"")
check_variables("\"routes${\"")
check_variables("\"routes${}\"")
check_variables("\"routes$ur\"")
check_variables("\"routes$uriblah\"")
check_variables("\"routes${uri\"")
check_variables("\"routes${{uri}\"")
if __name__ == '__main__':
TestVariables.main()
| [
"[email protected]"
] | |
7e96ded78edf879fd044bae181c6553700ee19a1 | 3db9ef78b62b01bf79dff6671b02c24192cd4648 | /13/8.py | b0c91ec8d0d5b114b03beb2ee22681599281cb1e | [] | no_license | rheehot/python-for-coding-test | 401f5655af1a8cf20bc86edb1635bdc4a9e88e52 | be95a0d0b3191bb21eab1075953fa472f4102351 | refs/heads/master | 2022-11-11T19:35:56.680749 | 2020-06-24T02:19:48 | 2020-06-24T02:19:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,759 | py | from collections import deque
def get_next_pos(pos, board):
next_pos = [] # 반환 결과 (이동 가능한 위치들)
pos = list(pos) # 현재 위치
pos1_x, pos1_y, pos2_x, pos2_y = pos[0][0], pos[0][1], pos[1][0], pos[1][1]
# (상, 하, 좌, 우)로 이동하는 경우에 대해서 처리
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
for i in range(4):
pos1_next_x, pos1_next_y, pos2_next_x, pos2_next_y = pos1_x + dx[i], pos1_y + dy[i], pos2_x + dx[i], pos2_y + dy[i]
# 이동하고자 하는 두 칸이 모두 비어있다면
if board[pos1_next_x][pos1_next_y] == 0 and board[pos2_next_x][pos2_next_y] == 0:
next_pos.append({(pos1_next_x, pos1_next_y), (pos2_next_x, pos2_next_y)})
# 현재 로봇이 가로로 놓여 있는 경우
if pos1_x == pos2_x:
for i in [-1, 1]: # 위쪽으로 회전하거나, 아래쪽으로 회전
if board[pos1_x + i][pos1_y] == 0 and board[pos2_x + i][pos2_y] == 0: # 위쪽 혹은 아래쪽 두 칸이 모두 비어 있다면
next_pos.append({(pos1_x, pos1_y), (pos1_x + i, pos1_y)})
next_pos.append({(pos2_x, pos2_y), (pos2_x + i, pos2_y)})
# 현재 로봇이 세로로 놓여 있는 경우
elif pos1_y == pos2_y:
for i in [-1, 1]: # 왼쪽으로 회전하거나, 오른쪽으로 회전
if board[pos1_x][pos1_y + i] == 0 and board[pos2_x][pos2_y + i] == 0: # 왼쪽 혹은 오른쪽 두 칸이 모두 비어 있다면
next_pos.append({(pos1_x, pos1_y), (pos1_x, pos1_y + i)})
next_pos.append({(pos2_x, pos2_y), (pos2_x, pos2_y + i)})
# 현재 위치에서 이동할 수 있는 위치를 반환
return next_pos
def solution(board):
# 맵의 외곽에 벽을 두는 형태로 맵 변형
n = len(board)
new_board = [[1] * (n + 2) for _ in range(n + 2)]
for i in range(n):
for j in range(n):
new_board[i + 1][j + 1] = board[i][j]
# 너비 우선 탐색(BFS) 수행
q = deque()
visited = []
pos = {(1, 1), (1, 2)} # 시작 위치 설정
q.append((pos, 0)) # 큐에 삽입한 뒤에
visited.append(pos) # 방문 처리
# 큐가 빌 때까지 반복
while q:
pos, cost = q.popleft()
# (n, n) 위치에 로봇이 도달했다면, 최단 거리이므로 반환
if (n, n) in pos:
return cost
# 현재 위치에서 이동할 수 있는 위치 확인
for next_pos in get_next_pos(pos, new_board):
# 아직 방문하지 않은 위치라면 큐에 삽입하고 방문 처리
if next_pos not in visited:
q.append((next_pos, cost + 1))
visited.append(next_pos)
return 0
| [
"[email protected]"
] | |
43fceb1cbee1e30cbb8565be49c40ba5a3866b44 | 6d9fbe6e6a2abfd8455e92f6dba67a5f02d87f41 | /lib/phonenumbers/shortdata/region_TR.py | 4840c1a7757ec99155ac8ae581af9b61d4516426 | [] | no_license | JamesBrace/InfluenceUWebLaunch | 549d0b48ff3259b139cb891a19cb8b5382ffe2c8 | 332d25940e4b1b45a7a2a8200f77c8413543b199 | refs/heads/master | 2021-09-04T04:08:47.594900 | 2018-01-15T16:49:29 | 2018-01-15T16:49:29 | 80,778,825 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | """Auto-generated file, do not edit by hand. TR metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_TR = PhoneMetadata(id='TR', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2}', possible_number_pattern='\\d{3}', possible_length=(3,)),
toll_free=PhoneNumberDesc(),
premium_rate=PhoneNumberDesc(),
emergency=PhoneNumberDesc(national_number_pattern='1(?:1[02]|55)', possible_number_pattern='\\d{3}', example_number='112', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='1(?:1[02]|55)', possible_number_pattern='\\d{3}', example_number='112', possible_length=(3,)),
standard_rate=PhoneNumberDesc(),
carrier_specific=PhoneNumberDesc(),
short_data=True)
| [
"[email protected]"
] | |
9026e69e8f119456f9e40a29da8f7c7d3ef7372b | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_PolyTrend_BestCycle_MLP.py | 5274eb0449dc0222445102838746fbe7b7badd4e | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 154 | py | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Integration'] , ['PolyTrend'] , ['BestCycle'] , ['MLP'] ); | [
"[email protected]"
] | |
36fa9338504116911b5efc2f47a261d074edb8a3 | 3cd4902b67de144d8e6f36335e125d0548d8cf97 | /submissions/runs/RUN10_vc_extended_model_img_unsorted.py | 129ab86ee4ee8a26ac6546af8dd14261d13a222a | [
"MIT"
] | permissive | stefantaubert/imageclef-lifelog-2019 | 5d201c2a28f15f608b9b58b94ab2ecddb5201205 | ad49dc79db98a163c5bc282fb179c0f7730546b3 | refs/heads/master | 2022-10-06T12:42:30.011610 | 2022-08-29T13:35:09 | 2022-08-29T13:35:09 | 196,553,184 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,215 | py | from src.models.pooling.Model_opts import *
from src.data.RawPlaces365Data import name_raw_places
from src.data.IndoorOutdoorData import name_io
from src.data.CocoYoloData import name_yolo
from src.data.CocoDetectronData import name_detectron
from src.data.CocoDefaultData import name_coco_default
from src.data.OpenImagesData import name_oi
from src.data.ImageNetData import name_imagenet
from src.data.SUNattributesData import name_sun
from submissions.runs.run_base import run_on_dev
from submissions.runs.run_base import run_on_test
opts = {
opt_model: {
opt_use_seg: False,
opt_subm_imgs_per_day: 0,
opt_subm_imgs_per_day_only_on_recall: False,
opt_comp_method: comp_method_datamax,
opt_comp_use_weights: True,
opt_query_src: query_src_title,
opt_use_tokenclustering: False,
opt_optimize_labels: True,
},
opt_data: {
name_coco_default: {
opt_weight: 1,
opt_threshold: 0.9,
opt_use_idf: False,
opt_idf_boosting_threshold: 0,
opt_intensify_factor_m: 1,
opt_intensify_factor_p: 1,
opt_ceiling: False,
},
name_detectron: {
opt_weight: 1,
opt_threshold: 0.95,
opt_use_idf: False,
opt_idf_boosting_threshold: 0,
opt_intensify_factor_m: 1,
opt_intensify_factor_p: 1,
opt_ceiling: False,
},
name_yolo: {
opt_weight: 1,
opt_threshold: 0.9,
opt_use_idf: False,
opt_idf_boosting_threshold: 0,
opt_intensify_factor_m: 1,
opt_intensify_factor_p: 1,
opt_ceiling: False,
},
name_imagenet: {
opt_weight: 1,
opt_threshold: 0.99,
opt_use_idf: False,
opt_idf_boosting_threshold: 0,
opt_intensify_factor_m: 1,
opt_intensify_factor_p: 1,
opt_ceiling: False,
},
name_oi: {
opt_weight: 1,
opt_threshold: 0,
opt_use_idf: True,
opt_idf_boosting_threshold: 0.5,
opt_intensify_factor_m: 2,
opt_intensify_factor_p: 2,
opt_ceiling: True,
},
name_raw_places: {
opt_weight: 1,
opt_threshold: 0,
opt_use_idf: False,
opt_idf_boosting_threshold: 0,
opt_intensify_factor_m: 1,
opt_intensify_factor_p: 1,
opt_ceiling: True,
},
name_io: {
opt_weight: 1,
opt_threshold: 0,
opt_use_idf: False,
opt_idf_boosting_threshold: 0,
opt_intensify_factor_m: 3,
opt_intensify_factor_p: 3,
opt_ceiling: False,
},
name_sun: {
opt_weight: 1,
opt_threshold: 0,
opt_use_idf: False,
opt_idf_boosting_threshold: 0,
opt_intensify_factor_m: 1,
opt_intensify_factor_p: 1,
opt_ceiling: False,
},
},
}
if __name__ == "__main__":
run_on_dev(opts)
run_on_test(opts)
| [
"[email protected]"
] | |
99fbbf8071ba11b6ce828063c78654215208e339 | bede13ba6e7f8c2750815df29bb2217228e91ca5 | /medical_lab_management/__manifest__.py | 01ea6d84e8879c00ab859c47d9b8fa1631145e57 | [] | no_license | CybroOdoo/CybroAddons | f44c1c43df1aad348409924603e538aa3abc7319 | 4b1bcb8f17aad44fe9c80a8180eb0128e6bb2c14 | refs/heads/16.0 | 2023-09-01T17:52:04.418982 | 2023-09-01T11:43:47 | 2023-09-01T11:43:47 | 47,947,919 | 209 | 561 | null | 2023-09-14T01:47:59 | 2015-12-14T02:38:57 | HTML | UTF-8 | Python | false | false | 2,048 | py | # -*- coding: utf-8 -*-
#############################################################################
#
# Cybrosys Technologies Pvt. Ltd.
#
# Copyright (C) 2021-TODAY Cybrosys Technologies(<https://www.cybrosys.com>).
#
# You can modify it under the terms of the GNU AFFERO
# GENERAL PUBLIC LICENSE (AGPL v3), Version 3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE (AGPL v3) for more details.
#
# You should have received a copy of the GNU AFFERO GENERAL PUBLIC LICENSE
# (AGPL v3) along with this program.
# If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
{
'name': "Medical Lab Management",
'version': '16.0.1.1.0',
'summary': """Manage Medical Lab Operations.""",
'description': """Manage Medical Lab General Operations, Odoo15, Odoo 15""",
'author': "Cybrosys Techno Solutions",
'maintainer': 'Cybrosys Techno Solutions',
'company': "Cybrosys Techno Solutions",
'website': "https://www.cybrosys.com",
'category': 'Industries',
'depends': ['base', 'mail', 'account', 'contacts'],
'data': [
'security/lab_users.xml',
'security/ir.model.access.csv',
'views/res_partner.xml',
'views/lab_patient_view.xml',
'views/test_unit_view.xml',
'views/lab_test_type.xml',
'views/lab_test_content_type.xml',
'views/physician_specialty.xml',
'views/physician_details.xml',
'views/lab_request.xml',
'views/lab_appointment.xml',
'views/account_invoice.xml',
'report/report.xml',
'report/lab_test_report.xml',
'report/lab_patient_card.xml',
],
'images': ['static/description/banner.png'],
'license': 'AGPL-3',
'installable': True,
'auto_install': False,
'application': True,
}
| [
"[email protected]"
] | |
2d48271b9fc70a4e9d62124e31981289ac41c030 | cfb373af248f1f24124194913a52d395e6b826e7 | /recruitment_plus/config/docs.py | e2d3da882af7144d3fec38727c269c5516b501da | [
"MIT"
] | permissive | leaftechnology/recruitment-plus | 616da8e1b9fc405d431e3e20559f55c2b5e78981 | 505478a9d4299b18089dba41a86d7ab3b4907289 | refs/heads/master | 2023-04-02T13:50:52.135805 | 2021-04-12T13:29:24 | 2021-04-12T13:29:24 | 328,859,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | """
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/recruitment_plus"
# docs_base_url = "https://[org_name].github.io/recruitment_plus"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "Recruitment Plus"
| [
"[email protected]"
] | |
a81fbbd2f5f2f89caa41421f4da4cedacd4fe732 | bc441bb06b8948288f110af63feda4e798f30225 | /staff_manage_sdk/model/metadata_center/stream_aggregate_states_pb2.py | e1c387bf61b3d82a52c66f128f07bb8158289ee0 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 3,721 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: stream_aggregate_states.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from staff_manage_sdk.model.metadata_center import stream_aggregate_rule_pb2 as staff__manage__sdk_dot_model_dot_metadata__center_dot_stream__aggregate__rule__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='stream_aggregate_states.proto',
package='metadata_center',
syntax='proto3',
serialized_options=_b('ZIgo.easyops.local/contracts/protorepo-models/easyops/model/metadata_center'),
serialized_pb=_b('\n\x1dstream_aggregate_states.proto\x12\x0fmetadata_center\x1a\x42staff_manage_sdk/model/metadata_center/stream_aggregate_rule.proto\"l\n\x15StreamAggregateStates\x12\x0b\n\x03org\x18\x01 \x01(\x05\x12\x0f\n\x07\x63ommand\x18\x02 \x01(\t\x12\x35\n\x07payload\x18\x03 \x03(\x0b\x32$.metadata_center.StreamAggregateRuleBKZIgo.easyops.local/contracts/protorepo-models/easyops/model/metadata_centerb\x06proto3')
,
dependencies=[staff__manage__sdk_dot_model_dot_metadata__center_dot_stream__aggregate__rule__pb2.DESCRIPTOR,])
_STREAMAGGREGATESTATES = _descriptor.Descriptor(
name='StreamAggregateStates',
full_name='metadata_center.StreamAggregateStates',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='org', full_name='metadata_center.StreamAggregateStates.org', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='command', full_name='metadata_center.StreamAggregateStates.command', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload', full_name='metadata_center.StreamAggregateStates.payload', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=118,
serialized_end=226,
)
_STREAMAGGREGATESTATES.fields_by_name['payload'].message_type = staff__manage__sdk_dot_model_dot_metadata__center_dot_stream__aggregate__rule__pb2._STREAMAGGREGATERULE
DESCRIPTOR.message_types_by_name['StreamAggregateStates'] = _STREAMAGGREGATESTATES
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StreamAggregateStates = _reflection.GeneratedProtocolMessageType('StreamAggregateStates', (_message.Message,), {
'DESCRIPTOR' : _STREAMAGGREGATESTATES,
'__module__' : 'stream_aggregate_states_pb2'
# @@protoc_insertion_point(class_scope:metadata_center.StreamAggregateStates)
})
_sym_db.RegisterMessage(StreamAggregateStates)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
8351589ff5cf619e24e9651f2c6e06360a29a3d5 | 0580861bd8b993ac92faec0ed88a339975d702c0 | /reagent/model_managers/discrete_dqn_base.py | ea825859334f6a14b3a64a0e0ef59b203444de62 | [
"BSD-3-Clause"
] | permissive | Sandy4321/ReAgent | 346094ae4c98121de5c54d504186f583de21daf0 | 0a387c1aeb922d242c705338fae9379becc82814 | refs/heads/master | 2023-07-17T01:27:17.762206 | 2021-08-19T03:15:15 | 2021-08-19T03:17:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,205 | py | #!/usr/bin/env python3
import abc
import logging
from typing import Dict, List, Optional, Tuple
from reagent.core import types as rlt
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import (
EvaluationParameters,
NormalizationData,
NormalizationKey,
RLParameters,
)
from reagent.data.data_fetcher import DataFetcher
from reagent.data.manual_data_module import ManualDataModule
from reagent.data.reagent_data_module import ReAgentDataModule
from reagent.gym.policies.policy import Policy
from reagent.gym.policies.predictor_policies import create_predictor_policy_from_model
from reagent.gym.policies.samplers.discrete_sampler import (
GreedyActionSampler,
)
from reagent.gym.policies.scorers.discrete_scorer import discrete_dqn_scorer
from reagent.model_managers.model_manager import ModelManager
from reagent.models.model_feature_config_provider import RawModelFeatureConfigProvider
from reagent.preprocessing.batch_preprocessor import (
BatchPreprocessor,
DiscreteDqnBatchPreprocessor,
)
from reagent.preprocessing.preprocessor import Preprocessor
from reagent.preprocessing.types import InputColumn
from reagent.reporting.discrete_dqn_reporter import DiscreteDQNReporter
from reagent.training import ReAgentLightningModule
from reagent.workflow.identify_types_flow import identify_normalization_parameters
from reagent.workflow.types import (
Dataset,
ModelFeatureConfigProvider__Union,
PreprocessingOptions,
ReaderOptions,
ResourceOptions,
RewardOptions,
TableSpec,
)
logger = logging.getLogger(__name__)
@dataclass
class DiscreteDQNBase(ModelManager):
target_action_distribution: Optional[List[float]] = None
state_feature_config_provider: ModelFeatureConfigProvider__Union = field(
# pyre-fixme[28]: Unexpected keyword argument `raw`.
default_factory=lambda: ModelFeatureConfigProvider__Union(
raw=RawModelFeatureConfigProvider(float_feature_infos=[])
)
)
preprocessing_options: Optional[PreprocessingOptions] = None
reader_options: Optional[ReaderOptions] = None
eval_parameters: EvaluationParameters = field(default_factory=EvaluationParameters)
def __post_init_post_parse__(self):
super().__post_init_post_parse__()
@property
@abc.abstractmethod
def rl_parameters(self) -> RLParameters:
pass
def create_policy(
self,
trainer_module: ReAgentLightningModule,
serving: bool = False,
normalization_data_map: Optional[Dict[str, NormalizationData]] = None,
) -> Policy:
"""Create an online DiscreteDQN Policy from env."""
if serving:
assert normalization_data_map
return create_predictor_policy_from_model(
self.build_serving_module(trainer_module, normalization_data_map),
rl_parameters=self.rl_parameters,
)
else:
sampler = GreedyActionSampler()
# pyre-fixme[6]: Expected `ModelBase` for 1st param but got
# `Union[torch.Tensor, torch.nn.Module]`.
scorer = discrete_dqn_scorer(trainer_module.q_network)
return Policy(scorer=scorer, sampler=sampler)
@property
def state_feature_config(self) -> rlt.ModelFeatureConfig:
return self.state_feature_config_provider.value.get_model_feature_config()
def get_state_preprocessing_options(self) -> PreprocessingOptions:
state_preprocessing_options = (
self.preprocessing_options or PreprocessingOptions()
)
state_features = [
ffi.feature_id for ffi in self.state_feature_config.float_feature_infos
]
logger.info(f"state allowedlist_features: {state_features}")
state_preprocessing_options = state_preprocessing_options._replace(
allowedlist_features=state_features
)
return state_preprocessing_options
@property
def multi_steps(self) -> Optional[int]:
return self.rl_parameters.multi_steps
def get_data_module(
self,
*,
input_table_spec: Optional[TableSpec] = None,
reward_options: Optional[RewardOptions] = None,
reader_options: Optional[ReaderOptions] = None,
setup_data: Optional[Dict[str, bytes]] = None,
saved_setup_data: Optional[Dict[str, bytes]] = None,
resource_options: Optional[ResourceOptions] = None,
) -> Optional[ReAgentDataModule]:
return DiscreteDqnDataModule(
input_table_spec=input_table_spec,
reward_options=reward_options,
setup_data=setup_data,
saved_setup_data=saved_setup_data,
reader_options=reader_options,
resource_options=resource_options,
model_manager=self,
)
def get_reporter(self):
return DiscreteDQNReporter(
self.trainer_param.actions,
target_action_distribution=self.target_action_distribution,
)
class DiscreteDqnDataModule(ManualDataModule):
@property
def should_generate_eval_dataset(self) -> bool:
return self.model_manager.eval_parameters.calc_cpe_in_training
def run_feature_identification(
self, input_table_spec: TableSpec
) -> Dict[str, NormalizationData]:
preprocessing_options = (
self.model_manager.preprocessing_options or PreprocessingOptions()
)
state_features = [
ffi.feature_id
for ffi in self.model_manager.state_feature_config.float_feature_infos
]
logger.info(f"Overriding allowedlist_features: {state_features}")
preprocessing_options = preprocessing_options._replace(
allowedlist_features=state_features
)
return {
NormalizationKey.STATE: NormalizationData(
dense_normalization_parameters=identify_normalization_parameters(
input_table_spec, InputColumn.STATE_FEATURES, preprocessing_options
)
)
}
def query_data(
self,
input_table_spec: TableSpec,
sample_range: Optional[Tuple[float, float]],
reward_options: RewardOptions,
data_fetcher: DataFetcher,
) -> Dataset:
return data_fetcher.query_data(
input_table_spec=input_table_spec,
discrete_action=True,
actions=self.model_manager.action_names,
include_possible_actions=True,
sample_range=sample_range,
custom_reward_expression=reward_options.custom_reward_expression,
multi_steps=self.model_manager.multi_steps,
gamma=self.model_manager.rl_parameters.gamma,
)
def build_batch_preprocessor(self) -> BatchPreprocessor:
state_preprocessor = Preprocessor(
self.state_normalization_data.dense_normalization_parameters,
use_gpu=self.resource_options.use_gpu,
)
return DiscreteDqnBatchPreprocessor(
num_actions=len(self.model_manager.action_names),
state_preprocessor=state_preprocessor,
use_gpu=self.resource_options.use_gpu,
)
| [
"[email protected]"
] | |
e280e7b4ce66799e836bba7771e9ef48dfd54688 | 59359e4821554f559c9ffc5bf1a7f52fff0c6051 | /descarteslabs/core/common/client/tests/test_attributes.py | cc396105832fecd35dea1fa023f4f5e890c94ff5 | [
"Apache-2.0"
] | permissive | descarteslabs/descarteslabs-python | 706acfc594721a1087872744c9cb72fe2b3d2e5b | a8a3859b8ced6d4478b93ff205caad06d508501d | refs/heads/master | 2023-08-23T12:01:36.802085 | 2023-08-21T14:57:22 | 2023-08-21T15:20:01 | 84,609,153 | 176 | 49 | NOASSERTION | 2023-05-02T15:54:37 | 2017-03-10T23:27:12 | Python | UTF-8 | Python | false | false | 10,438 | py | # Copyright 2018-2023 Descartes Labs.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from datetime import datetime, timezone
import pytz
from .. import Attribute, DatetimeAttribute, Document, DocumentState, ListAttribute
class MyDocument(Document):
id: int = Attribute(int, readonly=True)
name: str = Attribute(str)
local: str = Attribute(str, default="local", sticky=True)
once: int = Attribute(int, mutable=False)
default: datetime = DatetimeAttribute(default=lambda: datetime.utcnow())
created_at: datetime = DatetimeAttribute(readonly=True)
class TestDocument(unittest.TestCase):
def test_attribute(self):
doc = MyDocument(name="testing")
assert doc.name == "testing"
assert doc.state == DocumentState.NEW
def test_default(self):
doc = MyDocument()
assert doc.id is None
assert doc.name is None
assert doc.local == "local"
assert doc.once is None
date = doc.default
assert date is not None
assert doc.default == date
assert doc.created_at is None
def test_modified(self):
doc = MyDocument(name="test")
doc.name = "something new"
assert doc.name == "something new"
assert doc.is_modified
assert doc._modified == {"name"}
doc.name = None
assert doc.is_modified
assert doc._modified == {"name"}
assert doc.name is None
def test_coerce(self):
doc = MyDocument(once="1")
assert doc.once == 1
with self.assertRaises(ValueError) as ctx:
doc = MyDocument(once="1blguoaw")
assert "Unable to assign" in str(ctx.exception)
def test_attribute_immutable(self):
# Should be able to set the value once even if it's None
doc = MyDocument(once=None)
doc.once == 1
doc = MyDocument(once="1")
doc.once == 1
with self.assertRaises(ValueError) as ctx:
doc.once = 2
assert "Unable to set immutable attribute 'once'" == str(ctx.exception)
with self.assertRaises(ValueError) as ctx:
doc.once = None
assert "Unable to set immutable attribute 'once'" == str(ctx.exception)
def test_attribute_readonly(self):
with self.assertRaises(ValueError) as ctx:
MyDocument(id="123")
assert "Unable to set readonly attribute 'id'" == str(ctx.exception)
doc = MyDocument()
with self.assertRaises(ValueError) as ctx:
doc.id = "123"
assert "Unable to set readonly attribute 'id'" == str(ctx.exception)
def test_init_from_server(self):
now = datetime.utcnow()
# 2000-01-01, if set to 0 astimezone on windows in python 3.8 will error
timestamp = 946710000
data = {
"id": 1,
"name": "server",
"local": "server",
"once": 2,
"default": datetime.fromtimestamp(timestamp).isoformat(),
"created_at": now.isoformat(),
"extra": "should be ignored",
}
doc = MyDocument(**data, saved=True)
assert doc.id == 1
assert doc.name == "server"
assert doc.local == "local"
assert doc.once == 2
assert doc.default == datetime.fromtimestamp(timestamp, tz=timezone.utc)
assert doc.created_at == now.replace(tzinfo=timezone.utc)
with self.assertRaises(AttributeError):
doc.extra
def test_set_from_server(self):
now = datetime.utcnow()
doc = MyDocument(name="local", once="1", default=now)
# 2000-01-01, if set to 0 astimezone on windows in python 3.8 will error
timestamp = 946710000
assert doc.once == 1
data = {
"id": 1,
"name": "server",
"local": "server",
"once": 2,
"default": datetime.fromtimestamp(timestamp).isoformat(),
"created_at": now.isoformat(),
}
doc._load_from_remote(data)
assert doc.id == 1
assert doc.name == "server"
assert doc.local == "local"
assert doc.once == 2
assert doc.default == datetime.fromtimestamp(timestamp, tz=timezone.utc)
assert doc.created_at == now.replace(tzinfo=timezone.utc)
def test_to_dict(self):
doc = MyDocument(name="local", once="1")
assert doc.to_dict() == {
"id": None,
"name": "local",
"local": "local",
"once": 1,
"default": doc.default.isoformat(),
"created_at": None,
}
def test_deleted(self):
doc = MyDocument(name="local", once="1")
doc._deleted = True
with self.assertRaises(AttributeError) as ctx:
doc.name
assert "MyDocument has been deleted" == str(ctx.exception)
class TestDatetimeAttribute(unittest.TestCase):
def test_local_time(self):
class TzTest(Document):
date: datetime = DatetimeAttribute(timezone=pytz.timezone("MST"))
now = datetime.utcnow()
doc = TzTest(date=now.isoformat())
assert doc.date.tzinfo == pytz.timezone("MST")
assert doc.date.astimezone(tz=timezone.utc) == now.replace(tzinfo=timezone.utc)
assert doc.to_dict()["date"] == now.replace(tzinfo=timezone.utc).isoformat()
def test_trailing_z(self):
class TrailingTest(Document):
date: datetime = DatetimeAttribute()
now = datetime.utcnow()
doc = TrailingTest(date=now.isoformat() + "Z")
doc.date == now.replace(tzinfo=timezone.utc)
def test_assign_instance(self):
tz = pytz.timezone("MST")
class InstanceTest(Document):
date: datetime = DatetimeAttribute(timezone=tz)
now = datetime.utcnow()
doc = InstanceTest(date=now)
assert doc.date == now.replace(tzinfo=timezone.utc).astimezone(tz=tz)
def test_validation(self):
class ValidationTest(Document):
date: datetime = DatetimeAttribute()
with self.assertRaises(ValueError) as ctx:
doc = ValidationTest(date={})
assert "Expected iso formatted date or unix timestamp" in str(ctx.exception)
now = datetime.utcnow()
doc = ValidationTest(date=now.timestamp())
assert doc.date == now.replace(tzinfo=timezone.utc)
class TestListAttribute(unittest.TestCase):
def test_append(self):
class ListTest(Document):
items: list = ListAttribute(int)
doc = ListTest(items=[1, 2], saved=True)
doc.items.append(3)
assert doc.items == [1, 2, 3]
assert doc.is_modified
assert doc.to_dict()["items"] == [1, 2, 3]
def test_append_readonly(self):
class ListTest(Document):
items: list = ListAttribute(int, readonly=True)
doc = ListTest(items=[1, 2], saved=True)
with self.assertRaises(ValueError) as ctx:
doc.items.append(3)
assert "Unable to append readonly attribute 'items'" == str(ctx.exception)
assert doc.items == [1, 2]
def test_delete(self):
class ListTest(Document):
items: list = ListAttribute(int)
doc = ListTest(items=[1, 2], saved=True)
del doc.items[0]
assert doc.items == [2]
assert doc.is_modified
assert doc.to_dict()["items"] == [2]
def test_add_assign(self):
class ListTest(Document):
items: list = ListAttribute(int)
doc = ListTest(items=[1, 2], saved=True)
doc.items += [3]
assert doc.items == [1, 2, 3]
assert doc.is_modified
assert doc.to_dict()["items"] == [1, 2, 3]
doc._clear_modified()
doc.items += []
assert doc.items == [1, 2, 3]
assert doc.is_modified is False
assert doc.to_dict()["items"] == [1, 2, 3]
def test_clear(self):
class ListTest(Document):
items: list = ListAttribute(int)
doc = ListTest(items=[1, 2], saved=True)
doc.items.clear()
assert doc.items == []
assert doc.is_modified
assert doc.to_dict()["items"] == []
def test_extend(self):
class ListTest(Document):
items: list = ListAttribute(int)
doc = ListTest(items=[1, 2], saved=True)
doc.items.extend([3, 4])
assert doc.items == [1, 2, 3, 4]
assert doc.is_modified
assert doc.to_dict()["items"] == [1, 2, 3, 4]
def test_insert(self):
class ListTest(Document):
items: list = ListAttribute(int)
doc = ListTest(items=[1, 2], saved=True)
doc.items.insert(0, 0)
assert doc.items == [0, 1, 2]
assert doc.is_modified
assert doc.to_dict()["items"] == [0, 1, 2]
def test_pop(self):
class ListTest(Document):
items: list = ListAttribute(int)
doc = ListTest(items=[1, 2, 3], saved=True)
assert doc.items.pop() == 3
assert doc.items == [1, 2]
assert doc.is_modified
assert doc.to_dict()["items"] == [1, 2]
doc._clear_modified()
assert doc.items.pop(0) == 1
assert doc.items == [2]
assert doc.is_modified
assert doc.to_dict()["items"] == [2]
def test_remove(self):
class ListTest(Document):
items: list = ListAttribute(int)
doc = ListTest(items=[1, 2, 3], saved=True)
doc.items.remove(2)
assert doc.items == [1, 3]
assert doc.is_modified
assert doc.to_dict()["items"] == [1, 3]
def test_serializes_type(self):
class ListTest(Document):
items: list = ListAttribute(str)
doc = ListTest(items=[1, 2, 3], saved=True)
assert doc.to_dict()["items"] == ["1", "2", "3"]
doc.items.append(4)
assert doc.is_modified
assert doc.to_dict()["items"] == ["1", "2", "3", "4"]
| [
"[email protected]"
] | |
411d1b5d5d006f9c41b1c82bed003b39f7fba6ac | 27acd9eeb0d2b9b6326cc0477e7dbb84341e265c | /test/vraag4/src/isbn/222.py | fd40dde79d0542bab2d8bd49e8cc487684633488 | [] | no_license | VerstraeteBert/algos-ds | e0fe35bc3c5b7d8276c07250f56d3719ecc617de | d9215f11cdfa1a12a3b19ade3b95fa73848a636c | refs/heads/master | 2021-07-15T13:46:58.790446 | 2021-02-28T23:28:36 | 2021-02-28T23:28:36 | 240,883,220 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | def isISBN_13(code):
if len(code) != 13:
return False
if code[:3] != "978" and code[:3] != "979":
return False
even = code[::2]
oneven = code[1::2]
even_som = 0
oneven_som = 0
for i in range(6):
cijfer = int(even[i])
even_som += cijfer
cijfer = int(oneven[i])
oneven_som += cijfer
controle = (10 - (even_som + 3 * oneven_som) % 10) % 10
return controle == int(even[6])
def overzicht(codes):
types = ["Engelstalige landen", "Franstalige landen", "Duitstalige landen", "Japan", "Russischtalige landen",
"China", "Overige landen", "Fouten"]
lijst = {}
for soort in types:
lijst[soort] = 0
for code in codes:
if not isISBN_13(code):
lijst["Fouten"] += 1
else:
nr = code[3]
if nr == "0":
nr = "1"
elif nr in "689":
nr = "7"
elif nr == "7":
nr = "6"
soort = types[int(nr) - 1]
lijst[soort] += 1
for el in lijst:
print("{}: {}".format(el, lijst[el]))
| [
"[email protected]"
] | |
16af628a8124aa21de4d8f7daff20c5fc1e87eea | 0951b7ad46683d5fd99ae5611e33117b70d5ba1b | /scg_venv/lib/python3.8/site-packages/pandas/tests/io/xml/test_to_xml.py | beaa6d61f02c24e9cac8829c36633f2c67a36630 | [] | no_license | alissonpmedeiros/scg | 035bf833e16e39f56502f2a65633e361c6dc4fa6 | e3e022a14058936619f1d79d11dbbb4f6f48d531 | refs/heads/main | 2023-04-19T05:29:55.828544 | 2022-10-28T08:38:27 | 2022-10-28T08:38:27 | 525,835,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,499 | py | from __future__ import annotations
from io import (
BytesIO,
StringIO,
)
import os
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
NA,
DataFrame,
Index,
)
import pandas._testing as tm
import pandas.io.common as icom
from pandas.io.common import get_handle
from pandas.io.xml import read_xml
"""
CHECKLIST
[x] - ValueError: "Values for parser can only be lxml or etree."
etree
[x] - ImportError: "lxml not found, please install or use the etree parser."
[X] - TypeError: "...is not a valid type for attr_cols"
[X] - TypeError: "...is not a valid type for elem_cols"
[X] - LookupError: "unknown encoding"
[X] - KeyError: "...is not included in namespaces"
[X] - KeyError: "no valid column"
[X] - ValueError: "To use stylesheet, you need lxml installed..."
[] - OSError: (NEED PERMISSOIN ISSUE, DISK FULL, ETC.)
[X] - FileNotFoundError: "No such file or directory"
[X] - PermissionError: "Forbidden"
lxml
[X] - TypeError: "...is not a valid type for attr_cols"
[X] - TypeError: "...is not a valid type for elem_cols"
[X] - LookupError: "unknown encoding"
[] - OSError: (NEED PERMISSOIN ISSUE, DISK FULL, ETC.)
[X] - FileNotFoundError: "No such file or directory"
[X] - KeyError: "...is not included in namespaces"
[X] - KeyError: "no valid column"
[X] - ValueError: "stylesheet is not a url, file, or xml string."
[] - LookupError: (NEED WRONG ENCODING FOR FILE OUTPUT)
[] - URLError: (USUALLY DUE TO NETWORKING)
[] - HTTPError: (NEED AN ONLINE STYLESHEET)
[X] - OSError: "failed to load external entity"
[X] - XMLSyntaxError: "Opening and ending tag mismatch"
[X] - XSLTApplyError: "Cannot resolve URI"
[X] - XSLTParseError: "failed to compile"
[X] - PermissionError: "Forbidden"
"""
geom_df = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": [360, 360, 180],
"sides": [4, np.nan, 3],
}
)
planet_df = DataFrame(
{
"planet": [
"Mercury",
"Venus",
"Earth",
"Mars",
"Jupiter",
"Saturn",
"Uranus",
"Neptune",
],
"type": [
"terrestrial",
"terrestrial",
"terrestrial",
"terrestrial",
"gas giant",
"gas giant",
"ice giant",
"ice giant",
],
"location": [
"inner",
"inner",
"inner",
"inner",
"outer",
"outer",
"outer",
"outer",
],
"mass": [
0.330114,
4.86747,
5.97237,
0.641712,
1898.187,
568.3174,
86.8127,
102.4126,
],
}
)
from_file_expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<index>0</index>
<category>cooking</category>
<title>Everyday Italian</title>
<author>Giada De Laurentiis</author>
<year>2005</year>
<price>30.0</price>
</row>
<row>
<index>1</index>
<category>children</category>
<title>Harry Potter</title>
<author>J K. Rowling</author>
<year>2005</year>
<price>29.99</price>
</row>
<row>
<index>2</index>
<category>web</category>
<title>Learning XML</title>
<author>Erik T. Ray</author>
<year>2003</year>
<price>39.95</price>
</row>
</data>"""
def equalize_decl(doc):
# etree and lxml differ on quotes and case in xml declaration
if doc is not None:
doc = doc.replace(
'<?xml version="1.0" encoding="utf-8"?',
"<?xml version='1.0' encoding='utf-8'?",
)
return doc
@pytest.fixture(params=["rb", "r"])
def mode(request):
return request.param
@pytest.fixture(params=[pytest.param("lxml", marks=td.skip_if_no("lxml")), "etree"])
def parser(request):
return request.param
# FILE OUTPUT
def test_file_output_str_read(datapath, parser):
filename = datapath("io", "data", "xml", "books.xml")
df_file = read_xml(filename, parser=parser)
with tm.ensure_clean("test.xml") as path:
df_file.to_xml(path, parser=parser)
with open(path, "rb") as f:
output = f.read().decode("utf-8").strip()
output = equalize_decl(output)
assert output == from_file_expected
def test_file_output_bytes_read(datapath, parser):
filename = datapath("io", "data", "xml", "books.xml")
df_file = read_xml(filename, parser=parser)
with tm.ensure_clean("test.xml") as path:
df_file.to_xml(path, parser=parser)
with open(path, "rb") as f:
output = f.read().decode("utf-8").strip()
output = equalize_decl(output)
assert output == from_file_expected
def test_str_output(datapath, parser):
filename = datapath("io", "data", "xml", "books.xml")
df_file = read_xml(filename, parser=parser)
output = df_file.to_xml(parser=parser)
output = equalize_decl(output)
assert output == from_file_expected
def test_wrong_file_path(parser):
path = "/my/fake/path/output.xml"
with pytest.raises(
OSError,
match=(r"Cannot save file into a non-existent directory: .*path"),
):
geom_df.to_xml(path, parser=parser)
# INDEX
def test_index_false(datapath, parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<category>cooking</category>
<title>Everyday Italian</title>
<author>Giada De Laurentiis</author>
<year>2005</year>
<price>30.0</price>
</row>
<row>
<category>children</category>
<title>Harry Potter</title>
<author>J K. Rowling</author>
<year>2005</year>
<price>29.99</price>
</row>
<row>
<category>web</category>
<title>Learning XML</title>
<author>Erik T. Ray</author>
<year>2003</year>
<price>39.95</price>
</row>
</data>"""
filename = datapath("io", "data", "xml", "books.xml")
df_file = read_xml(filename, parser=parser)
with tm.ensure_clean("test.xml") as path:
df_file.to_xml(path, index=False, parser=parser)
with open(path, "rb") as f:
output = f.read().decode("utf-8").strip()
output = equalize_decl(output)
assert output == expected
def test_index_false_rename_row_root(datapath, parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<books>
<book>
<category>cooking</category>
<title>Everyday Italian</title>
<author>Giada De Laurentiis</author>
<year>2005</year>
<price>30.0</price>
</book>
<book>
<category>children</category>
<title>Harry Potter</title>
<author>J K. Rowling</author>
<year>2005</year>
<price>29.99</price>
</book>
<book>
<category>web</category>
<title>Learning XML</title>
<author>Erik T. Ray</author>
<year>2003</year>
<price>39.95</price>
</book>
</books>"""
filename = datapath("io", "data", "xml", "books.xml")
df_file = read_xml(filename, parser=parser)
with tm.ensure_clean("test.xml") as path:
df_file.to_xml(
path, index=False, root_name="books", row_name="book", parser=parser
)
with open(path, "rb") as f:
output = f.read().decode("utf-8").strip()
output = equalize_decl(output)
assert output == expected
@pytest.mark.parametrize(
"offset_index", [list(range(10, 13)), [str(i) for i in range(10, 13)]]
)
def test_index_false_with_offset_input_index(parser, offset_index):
"""
Tests that the output does not contain the `<index>` field when the index of the
input Dataframe has an offset.
This is a regression test for issue #42458.
"""
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<shape>square</shape>
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row>
<shape>circle</shape>
<degrees>360</degrees>
<sides/>
</row>
<row>
<shape>triangle</shape>
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>"""
offset_geom_df = geom_df.copy()
offset_geom_df.index = Index(offset_index)
output = offset_geom_df.to_xml(index=False, parser=parser)
output = equalize_decl(output)
assert output == expected
# NA_REP
na_expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<index>0</index>
<shape>square</shape>
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row>
<index>1</index>
<shape>circle</shape>
<degrees>360</degrees>
<sides/>
</row>
<row>
<index>2</index>
<shape>triangle</shape>
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>"""
def test_na_elem_output(datapath, parser):
output = geom_df.to_xml(parser=parser)
output = equalize_decl(output)
assert output == na_expected
def test_na_empty_str_elem_option(datapath, parser):
output = geom_df.to_xml(na_rep="", parser=parser)
output = equalize_decl(output)
assert output == na_expected
def test_na_empty_elem_option(datapath, parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<index>0</index>
<shape>square</shape>
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row>
<index>1</index>
<shape>circle</shape>
<degrees>360</degrees>
<sides>0.0</sides>
</row>
<row>
<index>2</index>
<shape>triangle</shape>
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>"""
output = geom_df.to_xml(na_rep="0.0", parser=parser)
output = equalize_decl(output)
assert output == expected
# ATTR_COLS
def test_attrs_cols_nan_output(datapath, parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row index="0" shape="square" degrees="360" sides="4.0"/>
<row index="1" shape="circle" degrees="360"/>
<row index="2" shape="triangle" degrees="180" sides="3.0"/>
</data>"""
output = geom_df.to_xml(attr_cols=["shape", "degrees", "sides"], parser=parser)
output = equalize_decl(output)
assert output == expected
def test_attrs_cols_prefix(datapath, parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<doc:data xmlns:doc="http://example.xom">
<doc:row doc:index="0" doc:shape="square" \
doc:degrees="360" doc:sides="4.0"/>
<doc:row doc:index="1" doc:shape="circle" \
doc:degrees="360"/>
<doc:row doc:index="2" doc:shape="triangle" \
doc:degrees="180" doc:sides="3.0"/>
</doc:data>"""
output = geom_df.to_xml(
attr_cols=["index", "shape", "degrees", "sides"],
namespaces={"doc": "http://example.xom"},
prefix="doc",
parser=parser,
)
output = equalize_decl(output)
assert output == expected
def test_attrs_unknown_column(parser):
with pytest.raises(KeyError, match=("no valid column")):
geom_df.to_xml(attr_cols=["shape", "degree", "sides"], parser=parser)
def test_attrs_wrong_type(parser):
with pytest.raises(TypeError, match=("is not a valid type for attr_cols")):
geom_df.to_xml(attr_cols='"shape", "degree", "sides"', parser=parser)
# ELEM_COLS
def test_elems_cols_nan_output(datapath, parser):
elems_cols_expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<degrees>360</degrees>
<sides>4.0</sides>
<shape>square</shape>
</row>
<row>
<degrees>360</degrees>
<sides/>
<shape>circle</shape>
</row>
<row>
<degrees>180</degrees>
<sides>3.0</sides>
<shape>triangle</shape>
</row>
</data>"""
output = geom_df.to_xml(
index=False, elem_cols=["degrees", "sides", "shape"], parser=parser
)
output = equalize_decl(output)
assert output == elems_cols_expected
def test_elems_unknown_column(parser):
with pytest.raises(KeyError, match=("no valid column")):
geom_df.to_xml(elem_cols=["shape", "degree", "sides"], parser=parser)
def test_elems_wrong_type(parser):
with pytest.raises(TypeError, match=("is not a valid type for elem_cols")):
geom_df.to_xml(elem_cols='"shape", "degree", "sides"', parser=parser)
def test_elems_and_attrs_cols(datapath, parser):
elems_cols_expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row shape="square">
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row shape="circle">
<degrees>360</degrees>
<sides/>
</row>
<row shape="triangle">
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>"""
output = geom_df.to_xml(
index=False,
elem_cols=["degrees", "sides"],
attr_cols=["shape"],
parser=parser,
)
output = equalize_decl(output)
assert output == elems_cols_expected
# HIERARCHICAL COLUMNS
def test_hierarchical_columns(datapath, parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<location>inner</location>
<type>terrestrial</type>
<count_mass>4</count_mass>
<sum_mass>11.81</sum_mass>
<mean_mass>2.95</mean_mass>
</row>
<row>
<location>outer</location>
<type>gas giant</type>
<count_mass>2</count_mass>
<sum_mass>2466.5</sum_mass>
<mean_mass>1233.25</mean_mass>
</row>
<row>
<location>outer</location>
<type>ice giant</type>
<count_mass>2</count_mass>
<sum_mass>189.23</sum_mass>
<mean_mass>94.61</mean_mass>
</row>
<row>
<location>All</location>
<type/>
<count_mass>8</count_mass>
<sum_mass>2667.54</sum_mass>
<mean_mass>333.44</mean_mass>
</row>
</data>"""
pvt = planet_df.pivot_table(
index=["location", "type"],
values="mass",
aggfunc=["count", "sum", "mean"],
margins=True,
).round(2)
output = pvt.to_xml(parser=parser)
output = equalize_decl(output)
assert output == expected
def test_hierarchical_attrs_columns(datapath, parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row location="inner" type="terrestrial" count_mass="4" \
sum_mass="11.81" mean_mass="2.95"/>
<row location="outer" type="gas giant" count_mass="2" \
sum_mass="2466.5" mean_mass="1233.25"/>
<row location="outer" type="ice giant" count_mass="2" \
sum_mass="189.23" mean_mass="94.61"/>
<row location="All" type="" count_mass="8" \
sum_mass="2667.54" mean_mass="333.44"/>
</data>"""
pvt = planet_df.pivot_table(
index=["location", "type"],
values="mass",
aggfunc=["count", "sum", "mean"],
margins=True,
).round(2)
output = pvt.to_xml(attr_cols=list(pvt.reset_index().columns.values), parser=parser)
output = equalize_decl(output)
assert output == expected
# MULTIINDEX
def test_multi_index(datapath, parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<location>inner</location>
<type>terrestrial</type>
<count>4</count>
<sum>11.81</sum>
<mean>2.95</mean>
</row>
<row>
<location>outer</location>
<type>gas giant</type>
<count>2</count>
<sum>2466.5</sum>
<mean>1233.25</mean>
</row>
<row>
<location>outer</location>
<type>ice giant</type>
<count>2</count>
<sum>189.23</sum>
<mean>94.61</mean>
</row>
</data>"""
agg = (
planet_df.groupby(["location", "type"])["mass"]
.agg(["count", "sum", "mean"])
.round(2)
)
output = agg.to_xml(parser=parser)
output = equalize_decl(output)
assert output == expected
def test_multi_index_attrs_cols(datapath, parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row location="inner" type="terrestrial" count="4" \
sum="11.81" mean="2.95"/>
<row location="outer" type="gas giant" count="2" \
sum="2466.5" mean="1233.25"/>
<row location="outer" type="ice giant" count="2" \
sum="189.23" mean="94.61"/>
</data>"""
agg = (
planet_df.groupby(["location", "type"])["mass"]
.agg(["count", "sum", "mean"])
.round(2)
)
output = agg.to_xml(attr_cols=list(agg.reset_index().columns.values), parser=parser)
output = equalize_decl(output)
assert output == expected
# NAMESPACE
def test_default_namespace(parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data xmlns="http://example.com">
<row>
<index>0</index>
<shape>square</shape>
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row>
<index>1</index>
<shape>circle</shape>
<degrees>360</degrees>
<sides/>
</row>
<row>
<index>2</index>
<shape>triangle</shape>
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>"""
output = geom_df.to_xml(namespaces={"": "http://example.com"}, parser=parser)
output = equalize_decl(output)
assert output == expected
# PREFIX
def test_namespace_prefix(parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<doc:data xmlns:doc="http://example.com">
<doc:row>
<doc:index>0</doc:index>
<doc:shape>square</doc:shape>
<doc:degrees>360</doc:degrees>
<doc:sides>4.0</doc:sides>
</doc:row>
<doc:row>
<doc:index>1</doc:index>
<doc:shape>circle</doc:shape>
<doc:degrees>360</doc:degrees>
<doc:sides/>
</doc:row>
<doc:row>
<doc:index>2</doc:index>
<doc:shape>triangle</doc:shape>
<doc:degrees>180</doc:degrees>
<doc:sides>3.0</doc:sides>
</doc:row>
</doc:data>"""
output = geom_df.to_xml(
namespaces={"doc": "http://example.com"}, prefix="doc", parser=parser
)
output = equalize_decl(output)
assert output == expected
def test_missing_prefix_in_nmsp(parser):
with pytest.raises(KeyError, match=("doc is not included in namespaces")):
geom_df.to_xml(
namespaces={"": "http://example.com"}, prefix="doc", parser=parser
)
def test_namespace_prefix_and_default(parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<doc:data xmlns="http://example.com" xmlns:doc="http://other.org">
<doc:row>
<doc:index>0</doc:index>
<doc:shape>square</doc:shape>
<doc:degrees>360</doc:degrees>
<doc:sides>4.0</doc:sides>
</doc:row>
<doc:row>
<doc:index>1</doc:index>
<doc:shape>circle</doc:shape>
<doc:degrees>360</doc:degrees>
<doc:sides/>
</doc:row>
<doc:row>
<doc:index>2</doc:index>
<doc:shape>triangle</doc:shape>
<doc:degrees>180</doc:degrees>
<doc:sides>3.0</doc:sides>
</doc:row>
</doc:data>"""
output = geom_df.to_xml(
namespaces={"": "http://example.com", "doc": "http://other.org"},
prefix="doc",
parser=parser,
)
output = equalize_decl(output)
if output is not None:
# etree and lxml differs on order of namespace prefixes
output = output.replace(
'xmlns:doc="http://other.org" xmlns="http://example.com"',
'xmlns="http://example.com" xmlns:doc="http://other.org"',
)
assert output == expected
# ENCODING
encoding_expected = """\
<?xml version='1.0' encoding='ISO-8859-1'?>
<data>
<row>
<index>0</index>
<rank>1</rank>
<malename>José</malename>
<femalename>Sofía</femalename>
</row>
<row>
<index>1</index>
<rank>2</rank>
<malename>Luis</malename>
<femalename>Valentina</femalename>
</row>
<row>
<index>2</index>
<rank>3</rank>
<malename>Carlos</malename>
<femalename>Isabella</femalename>
</row>
<row>
<index>3</index>
<rank>4</rank>
<malename>Juan</malename>
<femalename>Camila</femalename>
</row>
<row>
<index>4</index>
<rank>5</rank>
<malename>Jorge</malename>
<femalename>Valeria</femalename>
</row>
</data>"""
def test_encoding_option_str(datapath, parser):
filename = datapath("io", "data", "xml", "baby_names.xml")
df_file = read_xml(filename, parser=parser, encoding="ISO-8859-1").head(5)
output = df_file.to_xml(encoding="ISO-8859-1", parser=parser)
if output is not None:
# etree and lxml differ on quotes and case in xml declaration
output = output.replace(
'<?xml version="1.0" encoding="ISO-8859-1"?',
"<?xml version='1.0' encoding='ISO-8859-1'?",
)
assert output == encoding_expected
@td.skip_if_no("lxml")
def test_correct_encoding_file(datapath):
filename = datapath("io", "data", "xml", "baby_names.xml")
df_file = read_xml(filename, encoding="ISO-8859-1", parser="lxml")
with tm.ensure_clean("test.xml") as path:
df_file.to_xml(path, index=False, encoding="ISO-8859-1", parser="lxml")
@td.skip_if_no("lxml")
@pytest.mark.parametrize("encoding", ["UTF-8", "UTF-16", "ISO-8859-1"])
def test_wrong_encoding_option_lxml(datapath, parser, encoding):
filename = datapath("io", "data", "xml", "baby_names.xml")
df_file = read_xml(filename, encoding="ISO-8859-1", parser="lxml")
with tm.ensure_clean("test.xml") as path:
df_file.to_xml(path, index=False, encoding=encoding, parser=parser)
def test_misspelled_encoding(parser):
with pytest.raises(LookupError, match=("unknown encoding")):
geom_df.to_xml(encoding="uft-8", parser=parser)
# PRETTY PRINT
@td.skip_if_no("lxml")
def test_xml_declaration_pretty_print():
expected = """\
<data>
<row>
<index>0</index>
<shape>square</shape>
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row>
<index>1</index>
<shape>circle</shape>
<degrees>360</degrees>
<sides/>
</row>
<row>
<index>2</index>
<shape>triangle</shape>
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>"""
output = geom_df.to_xml(xml_declaration=False)
assert output == expected
def test_no_pretty_print_with_decl(parser):
expected = (
"<?xml version='1.0' encoding='utf-8'?>\n"
"<data><row><index>0</index><shape>square</shape>"
"<degrees>360</degrees><sides>4.0</sides></row><row>"
"<index>1</index><shape>circle</shape><degrees>360"
"</degrees><sides/></row><row><index>2</index><shape>"
"triangle</shape><degrees>180</degrees><sides>3.0</sides>"
"</row></data>"
)
output = geom_df.to_xml(pretty_print=False, parser=parser)
output = equalize_decl(output)
# etree adds space for closed tags
if output is not None:
output = output.replace(" />", "/>")
assert output == expected
def test_no_pretty_print_no_decl(parser):
expected = (
"<data><row><index>0</index><shape>square</shape>"
"<degrees>360</degrees><sides>4.0</sides></row><row>"
"<index>1</index><shape>circle</shape><degrees>360"
"</degrees><sides/></row><row><index>2</index><shape>"
"triangle</shape><degrees>180</degrees><sides>3.0</sides>"
"</row></data>"
)
output = geom_df.to_xml(xml_declaration=False, pretty_print=False, parser=parser)
# etree adds space for closed tags
if output is not None:
output = output.replace(" />", "/>")
assert output == expected
# PARSER
@td.skip_if_installed("lxml")
def test_default_parser_no_lxml():
with pytest.raises(
ImportError, match=("lxml not found, please install or use the etree parser.")
):
geom_df.to_xml()
def test_unknown_parser():
with pytest.raises(
ValueError, match=("Values for parser can only be lxml or etree.")
):
geom_df.to_xml(parser="bs4")
# STYLESHEET
xsl_expected = """\
<?xml version="1.0" encoding="utf-8"?>
<data>
<row>
<field field="index">0</field>
<field field="shape">square</field>
<field field="degrees">360</field>
<field field="sides">4.0</field>
</row>
<row>
<field field="index">1</field>
<field field="shape">circle</field>
<field field="degrees">360</field>
<field field="sides"/>
</row>
<row>
<field field="index">2</field>
<field field="shape">triangle</field>
<field field="degrees">180</field>
<field field="sides">3.0</field>
</row>
</data>"""
@td.skip_if_no("lxml")
def test_stylesheet_file_like(datapath, mode):
xsl = datapath("io", "data", "xml", "row_field_output.xsl")
with open(xsl, mode) as f:
assert geom_df.to_xml(stylesheet=f) == xsl_expected
@td.skip_if_no("lxml")
def test_stylesheet_io(datapath, mode):
xsl_path = datapath("io", "data", "xml", "row_field_output.xsl")
xsl_obj: BytesIO | StringIO
with open(xsl_path, mode) as f:
if mode == "rb":
xsl_obj = BytesIO(f.read())
else:
xsl_obj = StringIO(f.read())
output = geom_df.to_xml(stylesheet=xsl_obj)
assert output == xsl_expected
@td.skip_if_no("lxml")
def test_stylesheet_buffered_reader(datapath, mode):
xsl = datapath("io", "data", "xml", "row_field_output.xsl")
with open(xsl, mode) as f:
xsl_obj = f.read()
output = geom_df.to_xml(stylesheet=xsl_obj)
assert output == xsl_expected
@td.skip_if_no("lxml")
def test_stylesheet_wrong_path(datapath):
from lxml.etree import XMLSyntaxError
xsl = os.path.join("data", "xml", "row_field_output.xslt")
with pytest.raises(
XMLSyntaxError,
match=("Start tag expected, '<' not found"),
):
geom_df.to_xml(stylesheet=xsl)
@td.skip_if_no("lxml")
@pytest.mark.parametrize("val", ["", b""])
def test_empty_string_stylesheet(val):
from lxml.etree import XMLSyntaxError
with pytest.raises(
XMLSyntaxError, match=("Document is empty|Start tag expected, '<' not found")
):
geom_df.to_xml(stylesheet=val)
@td.skip_if_no("lxml")
def test_incorrect_xsl_syntax():
from lxml.etree import XMLSyntaxError
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" encoding="utf-8" indent="yes" >
<xsl:strip-space elements="*"/>
<xsl:template match="@*|node()">
<xsl:copy>
<xsl:apply-templates select="@*|node()"/>
</xsl:copy>
</xsl:template>
<xsl:template match="row/*">
<field>
<xsl:attribute name="field">
<xsl:value-of select="name()"/>
</xsl:attribute>
<xsl:value-of select="text()"/>
</field>
</xsl:template>
</xsl:stylesheet>"""
with pytest.raises(XMLSyntaxError, match=("Opening and ending tag mismatch")):
geom_df.to_xml(stylesheet=xsl)
@td.skip_if_no("lxml")
def test_incorrect_xsl_eval():
from lxml.etree import XSLTParseError
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" encoding="utf-8" indent="yes" />
<xsl:strip-space elements="*"/>
<xsl:template match="@*|node(*)">
<xsl:copy>
<xsl:apply-templates select="@*|node()"/>
</xsl:copy>
</xsl:template>
<xsl:template match="row/*">
<field>
<xsl:attribute name="field">
<xsl:value-of select="name()"/>
</xsl:attribute>
<xsl:value-of select="text()"/>
</field>
</xsl:template>
</xsl:stylesheet>"""
with pytest.raises(XSLTParseError, match=("failed to compile")):
geom_df.to_xml(stylesheet=xsl)
@td.skip_if_no("lxml")
def test_incorrect_xsl_apply(parser):
from lxml.etree import XSLTApplyError
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" encoding="utf-8" indent="yes" />
<xsl:strip-space elements="*"/>
<xsl:template match="@*|node()">
<xsl:copy>
<xsl:copy-of select="document('non_existent.xml')/*"/>
</xsl:copy>
</xsl:template>
</xsl:stylesheet>"""
with pytest.raises(XSLTApplyError, match=("Cannot resolve URI")):
with tm.ensure_clean("test.xml") as path:
geom_df.to_xml(path, stylesheet=xsl)
def test_stylesheet_with_etree(datapath):
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" encoding="utf-8" indent="yes" />
<xsl:strip-space elements="*"/>
<xsl:template match="@*|node(*)">
<xsl:copy>
<xsl:apply-templates select="@*|node()"/>
</xsl:copy>
</xsl:template>"""
with pytest.raises(
ValueError, match=("To use stylesheet, you need lxml installed")
):
geom_df.to_xml(parser="etree", stylesheet=xsl)
@td.skip_if_no("lxml")
def test_style_to_csv():
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="text" indent="yes" />
<xsl:strip-space elements="*"/>
<xsl:param name="delim">,</xsl:param>
<xsl:template match="/data">
<xsl:text>,shape,degrees,sides
</xsl:text>
<xsl:apply-templates select="row"/>
</xsl:template>
<xsl:template match="row">
<xsl:value-of select="concat(index, $delim, shape, $delim,
degrees, $delim, sides)"/>
<xsl:text>
</xsl:text>
</xsl:template>
</xsl:stylesheet>"""
out_csv = geom_df.to_csv(line_terminator="\n")
if out_csv is not None:
out_csv = out_csv.strip()
out_xml = geom_df.to_xml(stylesheet=xsl)
assert out_csv == out_xml
@td.skip_if_no("lxml")
def test_style_to_string():
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="text" indent="yes" />
<xsl:strip-space elements="*"/>
<xsl:param name="delim"><xsl:text> </xsl:text></xsl:param>
<xsl:template match="/data">
<xsl:text> shape degrees sides
</xsl:text>
<xsl:apply-templates select="row"/>
</xsl:template>
<xsl:template match="row">
<xsl:value-of select="concat(index, ' ',
substring($delim, 1, string-length('triangle')
- string-length(shape) + 1),
shape,
substring($delim, 1, string-length(name(degrees))
- string-length(degrees) + 2),
degrees,
substring($delim, 1, string-length(name(sides))
- string-length(sides) + 2),
sides)"/>
<xsl:text>
</xsl:text>
</xsl:template>
</xsl:stylesheet>"""
out_str = geom_df.to_string()
out_xml = geom_df.to_xml(na_rep="NaN", stylesheet=xsl)
assert out_xml == out_str
@td.skip_if_no("lxml")
def test_style_to_json():
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="text" indent="yes" />
<xsl:strip-space elements="*"/>
<xsl:param name="quot">"</xsl:param>
<xsl:template match="/data">
<xsl:text>{"shape":{</xsl:text>
<xsl:apply-templates select="descendant::row/shape"/>
<xsl:text>},"degrees":{</xsl:text>
<xsl:apply-templates select="descendant::row/degrees"/>
<xsl:text>},"sides":{</xsl:text>
<xsl:apply-templates select="descendant::row/sides"/>
<xsl:text>}}</xsl:text>
</xsl:template>
<xsl:template match="shape|degrees|sides">
<xsl:variable name="val">
<xsl:if test = ".=''">
<xsl:value-of select="'null'"/>
</xsl:if>
<xsl:if test = "number(text()) = text()">
<xsl:value-of select="text()"/>
</xsl:if>
<xsl:if test = "number(text()) != text()">
<xsl:value-of select="concat($quot, text(), $quot)"/>
</xsl:if>
</xsl:variable>
<xsl:value-of select="concat($quot, preceding-sibling::index,
$quot,':', $val)"/>
<xsl:if test="preceding-sibling::index != //row[last()]/index">
<xsl:text>,</xsl:text>
</xsl:if>
</xsl:template>
</xsl:stylesheet>"""
out_json = geom_df.to_json()
out_xml = geom_df.to_xml(stylesheet=xsl)
assert out_json == out_xml
# COMPRESSION
geom_xml = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<index>0</index>
<shape>square</shape>
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row>
<index>1</index>
<shape>circle</shape>
<degrees>360</degrees>
<sides/>
</row>
<row>
<index>2</index>
<shape>triangle</shape>
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>"""
def test_compression_output(parser, compression_only):
with tm.ensure_clean() as path:
geom_df.to_xml(path, parser=parser, compression=compression_only)
with get_handle(
path,
"r",
compression=compression_only,
) as handle_obj:
output = handle_obj.handle.read()
output = equalize_decl(output)
assert geom_xml == output.strip()
def test_filename_and_suffix_comp(parser, compression_only):
compfile = "xml." + icom._compression_to_extension[compression_only]
with tm.ensure_clean(filename=compfile) as path:
geom_df.to_xml(path, parser=parser, compression=compression_only)
with get_handle(
path,
"r",
compression=compression_only,
) as handle_obj:
output = handle_obj.handle.read()
output = equalize_decl(output)
assert geom_xml == output.strip()
def test_ea_dtypes(any_numeric_ea_dtype, parser):
# GH#43903
expected = """<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<index>0</index>
<a/>
</row>
</data>"""
df = DataFrame({"a": [NA]}).astype(any_numeric_ea_dtype)
result = df.to_xml(parser=parser)
assert equalize_decl(result).strip() == expected
def test_unsuported_compression(datapath, parser):
with pytest.raises(ValueError, match="Unrecognized compression type"):
with tm.ensure_clean() as path:
geom_df.to_xml(path, parser=parser, compression="7z")
# STORAGE OPTIONS
@td.skip_if_no("s3fs")
@td.skip_if_no("lxml")
def test_s3_permission_output(parser, s3_resource):
# s3_resource hosts pandas-test
import s3fs
with pytest.raises(PermissionError, match="Access Denied"):
fs = s3fs.S3FileSystem(anon=True)
fs.ls("pandas-test")
geom_df.to_xml("s3://pandas-test/geom.xml", compression="zip", parser=parser)
| [
"[email protected]"
] | |
68166f1c54bc0727d4ea84555b656e8b4fc72753 | a5200ba8b1d2b248c7c7bef5704c7e375efc1c2a | /exp_configs.py | c09ecd223c6f468e105e0d7348bd4b1cfa3bf410 | [] | no_license | hongyunnchen/sps | e0c958dadca2a60b0e8d797d8e786f88669cf5c7 | 4ddb3567f9a1893685ea161e2b1d7ba3cb3a1fe3 | refs/heads/master | 2023-02-26T06:36:41.462069 | 2021-02-09T12:22:09 | 2021-02-09T12:22:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,448 | py | from haven import haven_utils as hu
import itertools
# datasets
kernel_datasets = ["mushrooms",
# "w8a", "ijcnn",
# "rcv1"
]
# define runs
run_list = [0]
# define optimizers
c_list = [0.2]
sps_list = []
for c, adapt_flag in itertools.product(c_list, ['smooth_iter']):
sps_list += [{'name':"sps", "c":c, 'adapt_flag':adapt_flag}]
opt_list = sps_list + [{'name': 'adam'}]
EXP_GROUPS = {}
# define interpolation exp groups
EXP_GROUPS['kernel'] = hu.cartesian_exp_group({"dataset":kernel_datasets,
"model":["linear"],
"loss_func": ['logistic_loss'],
"acc_func": ["logistic_accuracy"],
"opt": opt_list ,
"batch_size":[100],
"max_epoch":[35],
"runs":run_list})
EXP_GROUPS['mf'] = hu.cartesian_exp_group({"dataset":["matrix_fac"],
"model":["matrix_fac_1", "matrix_fac_4", "matrix_fac_10", "linear_fac"],
"loss_func": ["squared_loss"],
"opt": opt_list,
"acc_func":["mse"],
"batch_size":[100],
"max_epoch":[50],
"runs":run_list})
EXP_GROUPS['mnist'] = hu.cartesian_exp_group({"dataset":["mnist"],
"model":["mlp"],
"loss_func": ["softmax_loss"],
"opt":[{'name':"sps", "c":c,
'adapt_flag':'smooth_iter',
'centralize_grad':True}] + opt_list,
"acc_func":["softmax_accuracy"],
"batch_size":[128],
"max_epoch":[200],
"runs":run_list})
EXP_GROUPS['deep'] = (hu.cartesian_exp_group({"dataset":["cifar10"],
"model":["resnet34", "densenet121"],
"loss_func": ["softmax_loss"],
"opt": opt_list,
"acc_func":["softmax_accuracy"],
"batch_size":[128],
"max_epoch":[200],
"runs":run_list}) +
hu.cartesian_exp_group({"dataset":["cifar100"],
"model":["resnet34_100", "densenet121_100"],
"loss_func": ["softmax_loss"],
"opt": opt_list,
"acc_func":["softmax_accuracy"],
"batch_size":[128],
"max_epoch":[200],
"runs":run_list})
)
EXP_GROUPS['cifar'] = hu.cartesian_exp_group({"dataset":["cifar10"],
"model":["resnet34"],
"loss_func": ["softmax_loss"],
"opt": opt_list + [{'name':"sps", "c":c,
'adapt_flag':'smooth_iter',
'centralize_grad':True}] ,
"acc_func":["softmax_accuracy"],
"batch_size":[128],
"max_epoch":[200],
"runs":[0]})
# define non-interpolation exp groups
eta_max_list = [1, 5, 100]
c_list = [0.5]
sps_l2_list = []
for c, eta_max in itertools.product(c_list, eta_max_list):
sps_l2_list += [{'name':"sps", "c":c,
'fstar_flag':True, 'eps':0,
'adapt_flag':'constant',
'eta_max':eta_max}]
sps_list = []
for c, eta_max in itertools.product(c_list, eta_max_list):
sps_list += [{'name':"sps", "c":c,
'fstar_flag':False, 'eps':0,
'adapt_flag':'constant',
'eta_max':eta_max}]
sgd_list = [{'name':"sgd",
"lr":10.0},{'name':"sgd",
"lr":1.0}, {'name':"sgd",
"lr":1e-3}, {'name':"sgd",
"lr":1e-1}, {'name':"sgd",
"lr":1e-2}]
EXP_GROUPS['syn_l2'] = (hu.cartesian_exp_group({"dataset":['syn'],
"model":["logistic"],
"loss_func": [
'logistic_l2_loss',
],
"acc_func": ["logistic_accuracy"],
"opt": sps_l2_list + sgd_list,
"batch_size":[1],
"max_epoch":[50],
"runs":run_list}))
EXP_GROUPS['syn'] = (hu.cartesian_exp_group({"dataset":['syn'],
"model":["logistic"],
"loss_func": [
'logistic_loss',
],
"acc_func": ["logistic_accuracy"],
"opt": sps_list + sgd_list,
"batch_size":[1],
"max_epoch":[50],
"runs":run_list}))
| [
"[email protected]"
] | |
5fbdd4faeaa02752c91f94d6860761a2dfb07bac | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/datashare/v20181101preview/get_adls_gen2_file_system_data_set.py | 6a6d525b551e637a119b660529a00a01bd625ef0 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,350 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetADLSGen2FileSystemDataSetResult',
'AwaitableGetADLSGen2FileSystemDataSetResult',
'get_adls_gen2_file_system_data_set',
'get_adls_gen2_file_system_data_set_output',
]
@pulumi.output_type
class GetADLSGen2FileSystemDataSetResult:
"""
An ADLS Gen 2 file system data set.
"""
def __init__(__self__, data_set_id=None, file_system=None, id=None, kind=None, name=None, resource_group=None, storage_account_name=None, subscription_id=None, type=None):
if data_set_id and not isinstance(data_set_id, str):
raise TypeError("Expected argument 'data_set_id' to be a str")
pulumi.set(__self__, "data_set_id", data_set_id)
if file_system and not isinstance(file_system, str):
raise TypeError("Expected argument 'file_system' to be a str")
pulumi.set(__self__, "file_system", file_system)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource_group and not isinstance(resource_group, str):
raise TypeError("Expected argument 'resource_group' to be a str")
pulumi.set(__self__, "resource_group", resource_group)
if storage_account_name and not isinstance(storage_account_name, str):
raise TypeError("Expected argument 'storage_account_name' to be a str")
pulumi.set(__self__, "storage_account_name", storage_account_name)
if subscription_id and not isinstance(subscription_id, str):
raise TypeError("Expected argument 'subscription_id' to be a str")
pulumi.set(__self__, "subscription_id", subscription_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="dataSetId")
def data_set_id(self) -> str:
"""
Unique id for identifying a data set resource
"""
return pulumi.get(self, "data_set_id")
@property
@pulumi.getter(name="fileSystem")
def file_system(self) -> str:
"""
The file system name.
"""
return pulumi.get(self, "file_system")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource id of the azure resource
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
Kind of data set.
Expected value is 'AdlsGen2FileSystem'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the azure resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> str:
"""
Resource group of storage account
"""
return pulumi.get(self, "resource_group")
@property
@pulumi.getter(name="storageAccountName")
def storage_account_name(self) -> str:
"""
Storage account name of the source data set
"""
return pulumi.get(self, "storage_account_name")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> str:
"""
Subscription id of storage account
"""
return pulumi.get(self, "subscription_id")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the azure resource
"""
return pulumi.get(self, "type")
class AwaitableGetADLSGen2FileSystemDataSetResult(GetADLSGen2FileSystemDataSetResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetADLSGen2FileSystemDataSetResult(
data_set_id=self.data_set_id,
file_system=self.file_system,
id=self.id,
kind=self.kind,
name=self.name,
resource_group=self.resource_group,
storage_account_name=self.storage_account_name,
subscription_id=self.subscription_id,
type=self.type)
def get_adls_gen2_file_system_data_set(account_name: Optional[str] = None,
data_set_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
share_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetADLSGen2FileSystemDataSetResult:
"""
An ADLS Gen 2 file system data set.
:param str account_name: The name of the share account.
:param str data_set_name: The name of the dataSet.
:param str resource_group_name: The resource group name.
:param str share_name: The name of the share.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['dataSetName'] = data_set_name
__args__['resourceGroupName'] = resource_group_name
__args__['shareName'] = share_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:datashare/v20181101preview:getADLSGen2FileSystemDataSet', __args__, opts=opts, typ=GetADLSGen2FileSystemDataSetResult).value
return AwaitableGetADLSGen2FileSystemDataSetResult(
data_set_id=__ret__.data_set_id,
file_system=__ret__.file_system,
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
resource_group=__ret__.resource_group,
storage_account_name=__ret__.storage_account_name,
subscription_id=__ret__.subscription_id,
type=__ret__.type)
@_utilities.lift_output_func(get_adls_gen2_file_system_data_set)
def get_adls_gen2_file_system_data_set_output(account_name: Optional[pulumi.Input[str]] = None,
data_set_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetADLSGen2FileSystemDataSetResult]:
"""
An ADLS Gen 2 file system data set.
:param str account_name: The name of the share account.
:param str data_set_name: The name of the dataSet.
:param str resource_group_name: The resource group name.
:param str share_name: The name of the share.
"""
...
| [
"[email protected]"
] | |
1f9ca65ce07629f7f3f5c41490cfa08c638c7723 | 6ac3e509c9d848497a7cb0f79008ec1f395f3aad | /Phone-Numbers/freecarrierlookup/freecarrierlookup/__main__.py | e28a3cacc22b778073bda4d6b71388e5f2893fbf | [] | no_license | WeilerWebServices/Scrapers | a87ca6c0fd719639be831623b2b55183932d8fba | 206ea9adf48e9b882a2d62df691185609483f9d0 | refs/heads/master | 2022-11-30T10:46:09.731660 | 2020-08-04T16:07:19 | 2020-08-04T16:07:19 | 273,375,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,091 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import time
import csv
from sys import stderr, stdout
try:
import phonenumbers
except ImportError:
phonenumbers = None
from . import FreeCarrierLookup
########################################
# Parse arguments
p = argparse.ArgumentParser(description='Lookup carrier information using FreeCarrierLookup.com')
if phonenumbers:
p.add_argument('phone_number', nargs='+', type=str.strip, help='Phone number to lookup')
p.add_argument('--region', default='US', help='libphonenumbers dialing region (default %(default)r)')
x = p.add_mutually_exclusive_group()
x.add_argument('--cc', type=str.strip,
help='Default country code (if none, all numbers must be in E.164 format)')
x.add_argument('-E', '--assume-e164', action='store_true',
help="Assume E.164 format even if leading '+' not present")
else:
p.description += '''; phonenumbers module not available (https://github.com/daviddrysdale/python-phonenumbers), so country code must be explicitly specified.'''
p.add_argument('phone_number', nargs='+', type=str.strip,
help='Phone number to lookup (without country code)')
p.add_argument('--cc', type=str.strip, required=True,
help='Country code for all numbers')
p.add_argument('-o','--output', type=argparse.FileType('w'), default=stdout, help='Output file (default is stdout)')
p.add_argument('-c','--csv', action='store_true', help='Output results in CSV format')
p.add_argument('-u', '--user-agent', help="User-Agent string (default is none)")
p.add_argument('-r', '--rate-limit', type=int, help="Rate limit in seconds per query (default is none)")
p.add_argument('--proxy', help='HTTPS proxy (in any format accepted by python-requests, e.g. socks5://localhost:8080)')
args = p.parse_args()
fcl = FreeCarrierLookup(args.user_agent)
csvwr = None
if args.proxy:
fcl.session.proxies['https'] = args.proxy
# Lookup phone numbers' carriers
rate_allow = None
for pn in args.phone_number:
if phonenumbers:
# parse into country code and "national number" with phonenumbers
if not pn.startswith('+'):
if args.cc: pn = '+%s %s' % (args.cc, pn)
elif args.assume_e164: pn = '+' + pn
try:
obj = phonenumbers.parse(pn, region=args.region)
cc, phonenum = obj.country_code, ('0'*(obj.number_of_leading_zeros or obj.italian_leading_zero or 0)) + str(obj.national_number)
except phonenumbers.NumberParseException as e:
print("WARNING: Could not parse %r with phonenumbers: %s" % (pn, ' '.join(e.args)), file=stderr)
continue
else:
# use country code and phone number as-is
if pn.startswith('+'):
print("WARNING: Skipping %r, which has an E.164 country code prefix (can't parse without phonenumbers module)" % pn, file=stderr)
continue
cc, phonenum = args.cc, ''.join(filter(str.isdigit, pn))
# Request (web interface includes test=456 and sessionlogin=0, but they don't seem to be required)
if args.rate_limit:
now = time.time()
if rate_allow and now < rate_allow: time.sleep(rate_allow - now)
rate_allow = time.time() + args.rate_limit
retry = True
while retry:
retry = False
try:
im, prompt = fcl.get_captcha()
captcha = None
if prompt:
print("CAPTCHA prompt: %s" % prompt, file=stderr)
captcha = input("CAPTCHA response (leave blank to show image)? ")
else:
print("Couldn't parse CAPTCHA prompt, showing image", file=stderr)
if not captcha:
im.show()
captcha = input("CAPTCHA response? ")
results = fcl.lookup(cc, phonenum, captcha)
except RuntimeError as e:
status, strings = e.args
if status == 'error' and 'quota' in strings[0].lower():
p.error('exceeded quota')
elif status == 'error' and 'captcha' in strings[0].lower():
print('Incorrect CAPTCHA response. Retry with new CAPTCHA', file=stderr)
retry = True
else:
print('%s received for +%s %s: %s' % (status.title(), cc, phonenum, ' '.join(strings)), file=stderr)
except Exception as e:
p.error('\n'.join(map(str, e.args)))
else:
if args.csv:
if csvwr is None:
csvwr = csv.writer(args.output)
csvwr.writerow(('Country Code', 'Phone Number', 'Carrier', 'Is Wireless', 'SMS Gateway Address', 'MMS Gateway Address', 'Note', 'Extra'))
csvwr.writerow((cc, phonenum, results.pop('Carrier', None), results.pop('Is Wireless', None), results.pop('SMS Gateway Address',None), results.pop('MMS Gateway Address',None), results.pop('Note',None), results or None))
else:
print('+%s %s: %s' % (cc, phonenum, results), file=args.output)
p.exit()
| [
"[email protected]"
] | |
680ed39c6067bac7d1093e8713b57f5a460cce64 | a8079efec61894fb6082986e66c4c146757fc895 | /src/constraints/operations/Set.py | bb54ca702dcb33a9ceb60f2d7578608f0f5e5c14 | [] | no_license | gsdlab/ClaferSMT | aaa5bd0c0c72f6a9b156529a871cced40e006cba | d8240b4503107641d62f7f913ebe50a88182d9a3 | refs/heads/master | 2021-01-16T21:23:22.838308 | 2015-08-20T00:24:54 | 2015-08-20T00:24:54 | 9,037,961 | 2 | 1 | null | 2018-08-21T13:48:02 | 2013-03-26T19:00:12 | TeX | UTF-8 | Python | false | false | 19,280 | py | '''
Created on Nov 1, 2013
@author: ezulkosk
'''
from common import Common, SMTLib
from common.Common import mAnd, mOr
from structures.ExprArg import ExprArg, IntArg, BoolArg, JoinArg
import sys
def getClaferMatch(key, my_list):
'''
TODO REMOVE
Returns the entries in my_list that correspond to either sub or super sorts of key,
specifically a list of tuples [(bool, int, (sort, Mask))],
where bool is True iff the key is the subsort,
int is the index of the subsort in the supersort (0 if the same sort),
(sort,Mask) are the actual entries from my_list.
'''
matches = []
for i in my_list:
(sort, _) = i
if key == sort:
matches.append((True,0,i))
else:
totalIndexInSuper = 0
tempKey = key
while tempKey.superSort:
totalIndexInSuper = totalIndexInSuper + tempKey.indexInSuper
tempKey = tempKey.superSort
if tempKey == sort:
matches.append((True, totalIndexInSuper, i))
break
totalIndexInSuper = 0
tempKey = sort
while tempKey.superSort:
totalIndexInSuper = totalIndexInSuper + tempKey.indexInSuper
tempKey = tempKey.superSort
if tempKey == key:
matches.append((False, totalIndexInSuper, i))
break
return matches
def find(key, l):
#TODO REMOVE
for i in l:
(sort, mask) = i
if sort == key:
return mask
def addMatchValues(matches, instances, left=True):
'''
Ignores PrimitiveSorts
'''
for (sort, index) in instances.keys():
(expr,polarity) = instances[(sort,index)]
#!!!
default = (SMTLib.SMT_BoolConst(False), Common.DEFINITELY_OFF)
(prev_left, prev_right) = matches.get((sort,index), (default,default))
if left:
(prev_expr, prev_pol) = prev_left
new_left = (mOr(expr, prev_expr), Common.aggregate_polarity(polarity, prev_pol))
new_right = prev_right
else:
(prev_expr, prev_pol) = prev_right
new_left = prev_left
new_right = (mOr(expr, prev_expr), Common.aggregate_polarity(polarity, prev_pol))
matches[(sort,index)] = (new_left,new_right)
return matches
def getSetInstancePairs(left,right=None):
#key -- (sort, index), where sort must be a highest sort
#value -- ([isOnExpr], [isOnExpr]), where the left and right come from leftInstanceSort or rightInstanceSort, respectively
matches = {}
matches = addMatchValues(matches, left.getInstances(), left=True)
if right:
matches = addMatchValues(matches, right.getInstances(), left=False)
return matches
def compute_int_set(instances):
cons = []
for index in range(len(instances)):
(i,c) = instances[index]
cons.append(mAnd(c, *[mOr(SMTLib.createNot(jc), SMTLib.SMT_NE(j,i)) for (j, jc) in instances[0:index]]))
return cons
def op_eq(left,right, cacheJoins=False, bc = None):
'''
:param left:
:type left: :class:`~ExprArg`
:param right:
:type right: :class:`~ExprArg`
:returns: :class:`~BoolArg`
Ensures that the left = right.
'''
assert isinstance(left, ExprArg)
assert isinstance(right, ExprArg)
if cacheJoins and bc:
#TODO CLEAN
left_key = None
right_key = None
keys = []
#asil allocation speedup, if both sides are sets, we can perform expression substitution in other constraints
#bc is the bracketed constraint to put the cache
for i in [left,right]:
if isinstance(i, JoinArg):
newkeys = Common.computeCacheKeys(i.flattenJoin())
#print(tuple(key))
keys = keys + newkeys
#need to return all keys during the progress of join, add flag?
#get the all keys
all_keys = i.checkIfJoinIsComputed(nonsupered=True, getAllKeys = True)
#print(keys)
#print(all_keys)
keys = keys+all_keys
#sys.exit()
#print()
#print("GGGG right" + str(right.__class__))
#print(right.clafers)
if len(left.clafers) != len(right.clafers):
minJoinVal = left.clafers if len(left.clafers) < len(right.clafers) else right.clafers
for i in keys:
#TODO make more robust (e.g. if multiple equalities exist for the same join key, aggregate expressions
bc.cache[i] = ExprArg(minJoinVal)
#print(i)
#print(minJoinVal)
#print(str(len(minJoinVal)) + " " + str(len(left.clafers)) + " " + str(len(right.clafers)))
#print(str(len(left.clafers)) + " " + str(len(right.clafers)))
cond = []
#int equality case
lints = [(e,c) for (e,c) in left.getInts() if str(c) != "False"]
rints = [(e,c) for (e,c) in right.getInts() if str(c) != "False"]
if lints or rints:
for (e,c) in lints:
#exists r in R s.t. e == r
expr = mOr(*[mAnd(rc, SMTLib.SMT_EQ(e,r)) for (r,rc) in rints])
if str(c) != "True":
expr = SMTLib.SMT_Implies(c, expr)
cond.append(expr)
for (e,c) in rints:
#exists l in L s.t. e == l
expr = mOr(*[mAnd(lc, SMTLib.SMT_EQ(e,l)) for (l,lc) in lints])
if str(c) != "True":
expr = SMTLib.SMT_Implies(c, expr)
cond.append(expr)
#clafer-set equality case
matches = getSetInstancePairs(left,right)
for ((lexpr, lpol),(rexpr, rpol)) in matches.values():
if lpol == Common.DEFINITELY_OFF and rpol == Common.DEFINITELY_OFF:
continue
elif lpol == Common.DEFINITELY_OFF:
cond.append(SMTLib.createNot(rexpr))
elif rpol == Common.DEFINITELY_OFF:
cond.append(SMTLib.createNot(lexpr))
else:
cond.append(SMTLib.SMT_Implies(lexpr, rexpr))
cond.append(SMTLib.SMT_Implies(rexpr, lexpr))
return BoolArg(mAnd(*cond))
def op_ne(left,right):
'''
:param left:
:type left: :class:`~ExprArg`
:param right:
:type right: :class:`~ExprArg`
:returns: :class:`~BoolArg`
Ensures that the left != right.
'''
assert isinstance(left, ExprArg)
assert isinstance(right, ExprArg)
expr = op_eq(left, right)
b = expr.getBool()
return BoolArg(SMTLib.createNot(b))
def op_implies(left,right):
'''
:param left:
:type left: :class:`~ExprArg`
:param right:
:type right: :class:`~ExprArg`
:returns: :class:`~BoolArg`
Ensure that if instance *i* of left is on, so is instance *i* of right.
'''
assert isinstance(left, ExprArg)
assert isinstance(right, ExprArg)
#clafer-set equality case
if left.getInts():
sys.exit("FIXME Implies")
if isinstance(left, BoolArg) and isinstance(right, BoolArg):
return BoolArg(SMTLib.SMT_Implies(left.getBool(), right.getBool()))
cond = []
matches = getSetInstancePairs(left,right)
for ((lexpr, lpol),(rexpr, rpol)) in matches.values():
if lpol == Common.DEFINITELY_OFF or rpol == Common.DEFINITELY_ON:
continue
elif lpol == Common.DEFINITELY_ON:
cond.append(rexpr)
else:
#lpol is unknown and rpol is off or unknown
#almost the same as op_difference below
cond.append(SMTLib.SMT_Implies(lexpr, rexpr))
return BoolArg(mAnd(*cond))
'''
#######################################################################
# END RELATIONAL/BOOLEAN OPERATORS
#######################################################################
'''
'''
#######################################################################
# SET OPERATORS
#######################################################################
'''
def getNextInstanceSort(left, right):
if left or right:
if left and right:
if left[0][0] < right[0][0]:
return [("l", left.pop(0))]
elif left[0][0] > right[0][0]:
return [("r", right.pop(0))]
else:
return [("l", left.pop(0)), ("r", right.pop(0))]
elif left:
return [("l", left.pop(0))]
else:
return [("r", right.pop(0))]
else:
return []
def op_card(arg):
'''
:param arg:
:type left: :class:`~arg`
:param right:
:type right: :class:`~ExprArg`
:returns: :class:`~IntArg`
Returns the number of instances that are *on* in arg.
'''
assert isinstance(arg, ExprArg)
instances = []
matches = getSetInstancePairs(arg)
known_card = 0
if arg.getInts():
card_cons = compute_int_set(arg.getInts())
for i in card_cons:
if isinstance(i, SMTLib.SMT_BoolConst):
if i.value:
known_card = known_card + 1
else:
instances.append(SMTLib.SMT_If(i, SMTLib.SMT_IntConst(1), SMTLib.SMT_IntConst(0)))
for (instance,_) in matches.values():
(expr, polarity) = instance
if polarity == Common.DEFINITELY_ON:
known_card = known_card + 1
else:
instances.append(SMTLib.SMT_If(expr, SMTLib.SMT_IntConst(1), SMTLib.SMT_IntConst(0)))
instances.append(SMTLib.SMT_IntConst(known_card))
return IntArg(SMTLib.createSum(instances))
def int_set_union(leftIntSort, rightIntSort):
sys.exit("TODO")
'''
(_,(left_sort, left_mask)) = leftIntSort
(_,(right_sort, right_mask)) = rightIntSort
newMask = Mask()
sort = IntSort()
for i in left_mask.keys():
cardMask_constraint = SMTLib.SMT_EQ(left_sort.cardinalityMask.get(i), SMTLib.SMT_IntConst(1))
if newMask.size() != 0:
noPrevious_constraint = SMTLib.SMT_And(*[SMTLib.SMT_Or(SMTLib.SMT_EQ(sort.cardinalityMask.get(j), SMTLib.SMT_IntConst(0)),
SMTLib.SMT_NE(newMask.get(j), left_mask.get(i))) for j in newMask.keys()])
else:
noPrevious_constraint = SMTLib.SMT_BoolConst(True)
full_constraint = SMTLib.SMT_And(noPrevious_constraint, cardMask_constraint)
sort.cardinalityMask.put(i, SMTLib.SMT_If(full_constraint, SMTLib.SMT_IntConst(1), SMTLib.SMT_IntConst(0)))
newMask.put(i, SMTLib.SMT_If(full_constraint, left_mask.get(i), SMTLib.SMT_IntConst(0)))
delta = left_mask.size()
for i in right_mask.keys():
cardMask_constraint = SMTLib.SMT_EQ(right_sort.cardinalityMask.get(i), SMTLib.SMT_IntConst(1))
if newMask.size() != 0:
noPrevious_constraint = SMTLib.SMT_And(*[SMTLib.SMT_Or(SMTLib.SMT_EQ(sort.cardinalityMask.get(j), SMTLib.SMT_IntConst(0)),
SMTLib.SMT_NE(newMask.get(j), right_mask.get(i))) for j in newMask.keys()])
else:
noPrevious_constraint = SMTLib.SMT_BoolConst(True)
full_constraint = SMTLib.SMT_And(noPrevious_constraint, cardMask_constraint)
#constraint = SMTLib.SMT_And(SMTLib.SMT_EQ(right_sort.cardinalityMask.get(i), SMTLib.SMT_IntConst(1)),
# *[SMTLib.SMT_Or(SMTLib.SMT_NE(left_mask.get(j), right_mask.get(i)),
# SMTLib.SMT_EQ(left_sort.cardinalityMask.get(j), SMTLib.SMT_IntConst(0))) for j in left_mask.keys()])
sort.cardinalityMask.put(i + delta, SMTLib.SMT_If(full_constraint, SMTLib.SMT_IntConst(1), SMTLib.SMT_IntConst(0)))
newMask.put(i+delta, SMTLib.SMT_If(full_constraint, right_mask.get(i), SMTLib.SMT_IntConst(0)))
return (sort, newMask)
'''
def putIfNotMatched(sort, mask, index, value, matches):
'''
Used to make sure you don't add duplicate elements to a set i.e. a sub and super.
Needed by union, intersection, and difference.
'''
if not matches:
mask.put(index, value)
else:
cond = []
for i in matches:
(leftIsSub, transform, (match_sort,match_mask)) = i
if leftIsSub:
if match_mask.get(index + transform):
cond.append(match_sort.isOff(match_mask.get(index + transform)))
else:
if match_mask.get(index - transform):
cond.append(match_sort.isOff(match_mask.get(index - transform)))
if not cond:
mask.put(index, value)
else:
mask.put(index, SMTLib.SMT_If(mAnd(*cond), value, SMTLib.SMT_IntConst(sort.parentInstances)))
def op_union(left,right):
'''
:param left:
:type left: :class:`~ExprArg`
:param right:
:type right: :class:`~ExprArg`
:returns: :class:`~ExprArg`
Computes the set union (left ++ right)
'''
assert isinstance(left, ExprArg)
assert isinstance(right, ExprArg)
if left.getInts() or right.getInts():
sys.exit("FIXME ints union")
matches = getSetInstancePairs(left,right)
newInstances = {}
for (sort,index) in matches.keys():
key = (sort,index)
((lexpr,lpol),(rexpr,rpol)) = matches[(sort,index)]
if rpol == Common.DEFINITELY_OFF and lpol == Common.DEFINITELY_OFF:
continue
else:
new_expr = mOr(lexpr,rexpr)
newInstances[key] = (new_expr, Common.aggregate_polarity(lpol, rpol))
return ExprArg(newInstances)
def int_set_intersection(left_sort, left_mask, right_sort, right_mask):
sys.exit("TODO")
'''
newMask = Mask()
sort = IntSort()
for i in left_mask.keys():
cardMask_constraint = SMTLib.SMT_EQ(left_sort.cardinalityMask.get(i), SMTLib.SMT_IntConst(1))
onRight_constraint = SMTLib.SMT_Or(*[SMTLib.SMT_And(SMTLib.SMT_EQ(left_mask.get(i), right_mask.get(j)),
SMTLib.SMT_EQ(right_sort.cardinalityMask.get(j), SMTLib.SMT_IntConst(1))) for j in right_mask.keys()])
if newMask.size() != 0:
noPrevious_constraint = SMTLib.SMT_And(*[SMTLib.SMT_Or(SMTLib.SMT_EQ(sort.cardinalityMask.get(j), SMTLib.SMT_IntConst(0)),
SMTLib.SMT_NE(newMask.get(j), left_mask.get(i))) for j in newMask.keys()])
else:
noPrevious_constraint = SMTLib.SMT_BoolConst(True)
full_constraint = SMTLib.SMT_And(noPrevious_constraint, cardMask_constraint, onRight_constraint)
sort.cardinalityMask.put(i, SMTLib.SMT_If(full_constraint, SMTLib.SMT_IntConst(1), SMTLib.SMT_IntConst(0)))
newMask.put(i, SMTLib.SMT_If(full_constraint, left_mask.get(i), SMTLib.SMT_IntConst(0)))
return (sort, newMask)
'''
def op_intersection(left,right):
'''
:param left:
:type left: :class:`~ExprArg`
:param right:
:type right: :class:`~ExprArg`
:returns: :class:`~ExprArg`
Computes the set intersection (left & right)
'''
assert isinstance(left, ExprArg)
assert isinstance(right, ExprArg)
if left.getInts() or right.getInts():
sys.exit("FIXME ints intersection")
matches = getSetInstancePairs(left,right)
newInstances = {}
for (sort,index) in matches.keys():
key = (sort,index)
((lexpr,lpol),(rexpr,rpol)) = matches[(sort,index)]
if rpol == Common.DEFINITELY_OFF or lpol == Common.DEFINITELY_OFF:
continue
else:
new_expr = mAnd(lexpr,rexpr)
newInstances[key] = (new_expr, Common.aggregate_polarity(lpol, rpol))
return ExprArg(newInstances)
def int_set_difference(leftIntSort, rightIntSort):
sys.exit("TODO")
'''
(_,(left_sort, left_mask)) = leftIntSort
(_,(right_sort, right_mask)) = rightIntSort
newMask = Mask()
sort = IntSort()
for i in left_mask.keys():
constraint = SMTLib.SMT_And(SMTLib.SMT_EQ(left_sort.cardinalityMask.get(i), SMTLib.SMT_IntConst(1)),
*[SMTLib.SMT_Or(SMTLib.SMT_NE(left_mask.get(i), right_mask.get(j)),
SMTLib.SMT_EQ(right_sort.cardinalityMask.get(j), SMTLib.SMT_IntConst(0))) for j in right_mask.keys()])
sort.cardinalityMask.put(i, SMTLib.SMT_If(constraint, SMTLib.SMT_IntConst(1), SMTLib.SMT_IntConst(0)))
newMask.put(i, SMTLib.SMT_If(constraint, left_mask.get(i), SMTLib.SMT_IntConst(0)))
return (sort, newMask)
'''
def op_difference(left,right):
'''
:param left:
:type left: :class:`~ExprArg`
:param right:
:type right: :class:`~ExprArg`
:returns: :class:`~ExprArg`
Computes the set difference (left - - right)
'''
assert isinstance(left, ExprArg)
assert isinstance(right, ExprArg)
if left.getInts() or right.getInts():
sys.exit("FIXME ints diff")
matches = getSetInstancePairs(left,right)
newInstances = {}
for (sort,index) in matches.keys():
key = (sort,index)
((lexpr,lpol),(rexpr,rpol)) = matches[(sort,index)]
if rpol == Common.DEFINITELY_ON or lpol == Common.DEFINITELY_OFF:
#cases (-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1)
continue
elif rpol == Common.DEFINITELY_OFF:
#cases (0 , -1), (1, -1)
newInstances[key] = (lexpr, lpol)
else:
#rpol is unknown, lpol is unknown or on => new_pol is UNKNOWN
#cases (0, 0), (1, 0)
#if right is not on, then left, else sort.isOff
new_expr = SMTLib.SMT_If(SMTLib.createNot(rexpr), lexpr, sort.parentInstances)
newInstances[key] = (new_expr, Common.UNKNOWN)
return ExprArg(newInstances)
def int_set_in(leftIntSort, rightIntSort):
(left_sort, left_mask) = leftIntSort
(right_sort, right_mask) = rightIntSort
cond = []
for i in left_mask.keys():
constraint = SMTLib.SMT_Or(SMTLib.SMT_EQ(left_sort.cardinalityMask.get(i), SMTLib.SMT_IntConst(0)),
SMTLib.SMT_Or(*[SMTLib.SMT_And(SMTLib.SMT_EQ(right_sort.cardinalityMask.get(j), SMTLib.SMT_IntConst(1)),
SMTLib.SMT_EQ(right_mask.get(j), left_mask.get(i))) for j in right_mask.keys()]))
cond.append(constraint)
return(SMTLib.SMT_And(*cond))
def op_in(left,right):
'''
:param left:
:type left: :class:`~ExprArg`
:param right:
:type right: :class:`~ExprArg`
:returns: :class:`~BoolArg`
Ensures that left is a subset of right.
'''
return op_implies(left,right)
def op_nin(left,right):
'''
:param left:
:type left: :class:`~ExprArg`
:param right:
:type right: :class:`~ExprArg`
:returns: :class:`~ExprArg`
Ensures that left is not a subset of right.
'''
assert isinstance(left, ExprArg)
assert isinstance(right, ExprArg)
expr = op_in(left,right)
return BoolArg(SMTLib.createNot(expr.pop_value()))
def op_domain_restriction(l,r):
sys.exit("Domain Restriction")
def op_range_restriction(l,r):
sys.exit("Range Restriction")
| [
"[email protected]"
] | |
50ffb4956388901f75c430e335b4c03a8493463b | d748710c6c5fa0f61b5bd6c2ec849d9250428811 | /demo1/client_python/test/test_format.py | a03e8a252d7c7c1a5ae10dea8b78b8c22f086cd7 | [] | no_license | stefan2904/aries-experiments | 9f4dab2d0711b76557e3d6ae8e5a27e532102685 | 46f31ee62cf951da2696e5ca4e6dc1d3d753743d | refs/heads/main | 2023-03-23T00:06:06.362992 | 2021-03-18T12:56:58 | 2021-03-18T12:56:58 | 329,986,417 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | # coding: utf-8
"""
(Aries Agent REST Server) of VC4SM University.
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.format import Format # noqa: E501
from swagger_client.rest import ApiException
class TestFormat(unittest.TestCase):
"""Format unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFormat(self):
"""Test Format"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.format.Format() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
4befa60f75f65fc7d117fd7196c46db4398c2c4c | f99cca94f74c69bc518e298c14140534e18eabd3 | /OrcApi/start_report.py | efcbc2ca473ffe5c686fdeb3c906e7f559f6ecab | [] | no_license | pubselenium/OrcTestToolsKit | d6d838d9937d2c4d86941e317cb3ff096b58e52d | f3ccbbceaed4f4996f6907a2f4880c2fd3f82bbb | refs/heads/master | 2021-04-29T05:15:53.240714 | 2016-12-30T09:42:53 | 2016-12-30T09:42:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 832 | py | # coding=utf-8
import sys
from flask import make_response
from OrcLib import init_log
from OrcLib import get_config
from OrcApi import app
from OrcApi import orc_api
from OrcApi.Run.ReportApi import ReportDetAPI
configer = get_config("server")
@app.after_request
def after_request(response):
response.headers['Access-Control-Allow-Origin'] = '*'
return response
@orc_api.representation("text/html")
def out_html(data, code, headers=None):
resp = make_response(data, code)
resp.headers.extend(headers or {})
return resp
# Widget
orc_api.add_resource(ReportDetAPI, '/api/1.0/Report/<string:p_id>/<string:p_time>', endpoint='Report')
driver_host = configer.get_option("REPORT", "ip")
driver_port = configer.get_option("REPORT", "port")
reload(sys)
init_log()
app.run(host=driver_host, port=driver_port)
| [
"[email protected]"
] | |
53cde0b836010d45228fa1c3b0df4ed331fc4563 | 0bde5f7f09aa537ed1f4828d4e5ebee66475918f | /h2o-py/tests/testdir_apis/Data_Manipulation/pyunit_h2oH2OFrame_split_frame.py | 7b6184fc5c776b24ad32cc3e4da703f2af126c3c | [
"Apache-2.0"
] | permissive | Winfredemalx54/h2o-3 | d69f1c07e1f5d2540cb0ce5e6073415fa0780d32 | dfb163c82ff3bfa6f88cdf02465a9bb4c8189cb7 | refs/heads/master | 2022-12-14T08:59:04.109986 | 2020-09-23T08:36:59 | 2020-09-23T08:36:59 | 297,947,978 | 2 | 0 | Apache-2.0 | 2020-09-23T11:28:54 | 2020-09-23T11:28:54 | null | UTF-8 | Python | false | false | 1,086 | py | from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
import numpy as np
from h2o.utils.typechecks import assert_is_type
from h2o.frame import H2OFrame
def h2o_H2OFrame_split_frame():
"""
Python API test: h2o.frame.H2OFrame.split_frame(ratios=None, destination_frames=None, seed=None)
"""
python_lists = np.random.uniform(-1,1, (10000,2))
h2oframe = h2o.H2OFrame(python_obj=python_lists)
newframe = h2oframe.split_frame(ratios=[0.5, 0.25], destination_frames=["f1", "f2", "f3"], seed=None)
assert_is_type(newframe, list)
assert_is_type(newframe[0], H2OFrame)
assert len(newframe)==3, "h2o.H2OFrame.split_frame() command is not working."
assert h2oframe.nrow==(newframe[0].nrow+newframe[1].nrow+newframe[2].nrow), "h2o.H2OFrame.split_frame() command " \
"is not working."
if __name__ == "__main__":
pyunit_utils.standalone_test(h2o_H2OFrame_split_frame())
else:
h2o_H2OFrame_split_frame()
| [
"[email protected]"
] | |
5953b3b9c01500579a6f297e7f5b22fd87d779c5 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/playground/ebayer/c2/kernel/pae/drivers/module-pae-openafs/actions.py | 5dcbf53a4428541a79d686435aaf1aa6a77d6da8 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import shelltools
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
from pisi.actionsapi import kerneltools
KDIR = kerneltools.getKernelVersion()
WorkDir = "openafs-%s" % get.srcVERSION()
def setup():
autotools.configure("--with-linux-kernel-headers=/lib/modules/%s/build" % KDIR)
def build():
autotools.make("-j1 only_libafs")
def install():
for m in ("libafs.ko", "afspag.ko"):
pisitools.insinto("/lib/modules/%s/kernel/extra/openafs" % KDIR, "src/libafs/MODLOAD-%s-SP/%s" % (KDIR, m))
| [
"[email protected]"
] | |
6e00615762e8df542d13ee65b1357bdf9cf232dc | a11984110d22e8231896c7e8bf2c6c2a96e46502 | /Daily Challenges/2020/June/Coin Change 2.py | 11def6bdf3936bba8f8e65cab5a71696240db825 | [] | no_license | Waqar-107/LeetCode | fbd323c89a5ea010b3322b0b35dd087a7744abc4 | 5f7dc48918c0367b20e733830e9807eb40840f77 | refs/heads/master | 2023-08-03T12:27:58.593051 | 2023-07-24T01:33:24 | 2023-07-24T01:33:24 | 220,239,559 | 8 | 7 | null | 2022-05-01T18:50:03 | 2019-11-07T13:08:48 | Python | UTF-8 | Python | false | false | 837 | py | class Solution:
def change(self, amount: int, coins: List[int]) -> int:
n = len(coins)
dp = [[0 for _ in range(n)] for _ in range(amount + 1)]
if amount == 0:
return 1
if n == 0:
return 0
for j in range(n):
dp[0][j] = 1
for i in range(1, amount + 1):
for j in range(n):
# include coin j
if i - coins[j] >= 0:
x = dp[i - coins[j]][j]
else:
x = 0
# do not include j
if j >= 1:
y = dp[i][j - 1]
else:
y = 0
dp[i][j] = x + y
return dp[amount][n - 1]
| [
"[email protected]"
] | |
0a6b8b51c8c6d0be55dbbec18662c723561424b8 | 44064ed79f173ddca96174913910c1610992b7cb | /Second_Processing_app/temboo/Library/SendGrid/WebAPI/Profile/__init__.py | 6d92d7a013674c733657f66763ff162be16e38d5 | [] | no_license | dattasaurabh82/Final_thesis | 440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5 | 8edaea62f5987db026adfffb6b52b59b119f6375 | refs/heads/master | 2021-01-20T22:25:48.999100 | 2014-10-14T18:58:00 | 2014-10-14T18:58:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | from UpdateContactProfileEmailAddress import *
from UpdateUsername import *
from ViewAccountProfile import *
from ResetPassword import *
from UpdateAccountProfile import *
| [
"[email protected]"
] | |
4ddb79704d7f95d929525eb9514d2329a0e2ae5f | 9ce4292954000fd66bcdbd0797a280c306308d08 | /quizzes/00.organize.me/Cracking the Coding Interview/10-5.py | b766c70b7b543f695e10a7a7269d68734ca8f968 | [
"MIT"
] | permissive | JiniousChoi/encyclopedia-in-code | 0c786f2405bfc1d33291715d9574cae625ae45be | 77bc551a03a2a3e3808e50016ece14adb5cfbd96 | refs/heads/master | 2021-06-27T07:50:10.789732 | 2020-05-29T12:50:46 | 2020-05-29T12:50:46 | 137,426,553 | 2 | 0 | MIT | 2020-10-13T08:56:12 | 2018-06-15T01:29:31 | Python | UTF-8 | Python | false | false | 1,342 | py | '''
10.5 - 빈 문자열이 섞여 있는 정렬 상태의 배열이 주어졌을 때, 특정한 문자열 의 위치를 찾는 메서드를 작성하라.
'''
def search_arr_with_empty_string(arr, target):
assert arr
left = init_left(arr)
right = init_right(arr)
mid = get_mid(arr, left, right)
while mid>=0:
if arr[mid]==target:
return mid
if arr[mid]>target:
right=mid
elif arr[mid]<target:
left=mid
else:
assert False
mid = get_mid(arr, left, right)
return -1
def init_left(arr):
for i,e in enumerate(arr):
if e:
return i
raise Exception("주어진 배열이 빈문자열로만 차있습니다")
def init_right(arr):
for i in range(len(arr)-1, -1, -1):
if arr[i]:
return i
raise Exception("주어진 배열이 빈문자열로만 차있습니다")
def get_mid(arr, left, right):
assert left < right
mid = (left+right)//2
if arr[mid]:
return mid
for t in range(mid-1, left, -1):
if arr[t]:
return t
for t in range(mid+1, right):
if arr[t]:
return t
return -1
sample_arr = ["at","","","","ball","","","car","","","dad","",""]
idx = search_arr_with_empty_string(sample_arr, "ball")
print(idx)
| [
"[email protected]"
] | |
dcbab961672293df7685c2b68b386d61314c7e39 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_144/ch149_2020_04_13_20_40_50_766295.py | 878ea953ff527f041b48cc86231d9d5b082aeda2 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py | salario_bruto = int(input(("Salario: "))
numero_dependentes = int(input("Dependentes: "))
contri_INSS = 0
if salario_bruto <= 1045:
contri_INSS = salario_bruto * 0.075
elif salario_bruto >= 1045.01 and salario_bruto <=2089.60:
contri_INSS = salario_bruto * 0.09
elif salario_bruto >= 2089.01 and salario_bruto <= 3134.40:
contri_INSS = salario_bruto * 0.12
elif salario_bruto >= 3134.41 and salario_bruto <=6101.06:
contri_INSS = salario_bruto * 0.14
else:
contri_INSS = 671.12
base = salario_bruto - contri_INSS - (numero_dependentes* 189.59)
aliquota = 0
deducao = 0
if base <= 1903.98:
aliquota = 0
deducao = 0
elif base >= 1903.99 and base <= 2826.65:
aliquota = 0.75
deducao = 142.80
elif base >= 2826.66 and base <= 3751.05:
aliquota = 0.15
deducao = 354.80
elif base >= 3751.06 and base <= 4664.68:
aliquota = 0.225
deducao = 636.13
else:
aliquota = 0.275
deducao = 869.36
IRRF = base * aliquota - deducao
print(IRRF) | [
"[email protected]"
] | |
584fe41b940e4bd228c969c3f6fe5b68081645b6 | cc0c0f99a5cf563ff52a76f2ac17cdad09d22f01 | /venv/Lib/site-packages/itk/itkQuadEdgeMeshToQuadEdgeMeshFilterPython.py | 9b071e6e20bc0503a4cd1054eb25977024483ead | [] | no_license | Marxss/carck_detect_system | 9c0d338bde322b4c7304fd0addb524d8697c8a7b | d2480f2108052af8af0aa5265a5239c309885043 | refs/heads/master | 2022-04-15T23:34:20.988335 | 2020-03-29T16:24:00 | 2020-03-29T16:24:00 | 214,625,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,466 | py | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3, 0, 0):
new_instancemethod = lambda func, inst, cls: _itkQuadEdgeMeshToQuadEdgeMeshFilterPython.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_itkQuadEdgeMeshToQuadEdgeMeshFilterPython', [dirname(__file__)])
except ImportError:
import _itkQuadEdgeMeshToQuadEdgeMeshFilterPython
return _itkQuadEdgeMeshToQuadEdgeMeshFilterPython
if fp is not None:
try:
_mod = imp.load_module('_itkQuadEdgeMeshToQuadEdgeMeshFilterPython', fp, pathname, description)
finally:
fp.close()
return _mod
_itkQuadEdgeMeshToQuadEdgeMeshFilterPython = swig_import_helper()
del swig_import_helper
else:
import _itkQuadEdgeMeshToQuadEdgeMeshFilterPython
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self, name, value):
if (name == "thisown"):
return self.this.own(value)
if hasattr(self, name) or (name == "this"):
set(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import itkQuadEdgeMeshBasePython
import itkVectorPython
import vnl_vectorPython
import vnl_matrixPython
import stdcomplexPython
import pyBasePython
import vnl_vector_refPython
import itkFixedArrayPython
import itkMapContainerPython
import ITKCommonBasePython
import itkPointPython
import itkImagePython
import itkOffsetPython
import itkSizePython
import itkMatrixPython
import vnl_matrix_fixedPython
import itkCovariantVectorPython
import itkSymmetricSecondRankTensorPython
import itkImageRegionPython
import itkIndexPython
import itkRGBPixelPython
import itkRGBAPixelPython
import itkGeometricalQuadEdgePython
import itkQuadEdgePython
import itkQuadEdgeCellTraitsInfoPython
import itkQuadEdgeMeshPointPython
import itkQuadEdgeMeshLineCellPython
import itkArrayPython
def itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3_New():
return itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3.New()
def itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2_New():
return itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2.New()
class itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2(itkQuadEdgeMeshBasePython.itkMeshToMeshFilterQEMD2QEMD2):
"""Proxy of C++ itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2_Pointer":
"""__New_orig__() -> itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2_Pointer"""
return _itkQuadEdgeMeshToQuadEdgeMeshFilterPython.itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2_Pointer":
"""Clone(itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2 self) -> itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2_Pointer"""
return _itkQuadEdgeMeshToQuadEdgeMeshFilterPython.itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2_Clone(self)
__swig_destroy__ = _itkQuadEdgeMeshToQuadEdgeMeshFilterPython.delete_itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2
def cast(obj: 'itkLightObject') -> "itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2 *":
"""cast(itkLightObject obj) -> itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2"""
return _itkQuadEdgeMeshToQuadEdgeMeshFilterPython.itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2
Create a new object of the class itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2.Clone = new_instancemethod(_itkQuadEdgeMeshToQuadEdgeMeshFilterPython.itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2_Clone, None, itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2)
itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2_swigregister = _itkQuadEdgeMeshToQuadEdgeMeshFilterPython.itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2_swigregister
itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2_swigregister(itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2)
def itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2___New_orig__() -> "itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2_Pointer":
"""itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2___New_orig__() -> itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2_Pointer"""
return _itkQuadEdgeMeshToQuadEdgeMeshFilterPython.itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2___New_orig__()
def itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2_cast(obj: 'itkLightObject') -> "itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2 *":
"""itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2_cast(itkLightObject obj) -> itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2"""
return _itkQuadEdgeMeshToQuadEdgeMeshFilterPython.itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2_cast(obj)
class itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3(itkQuadEdgeMeshBasePython.itkMeshToMeshFilterQEMD3QEMD3):
"""Proxy of C++ itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3_Pointer":
"""__New_orig__() -> itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3_Pointer"""
return _itkQuadEdgeMeshToQuadEdgeMeshFilterPython.itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3_Pointer":
"""Clone(itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3 self) -> itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3_Pointer"""
return _itkQuadEdgeMeshToQuadEdgeMeshFilterPython.itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3_Clone(self)
__swig_destroy__ = _itkQuadEdgeMeshToQuadEdgeMeshFilterPython.delete_itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3
def cast(obj: 'itkLightObject') -> "itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3 *":
"""cast(itkLightObject obj) -> itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3"""
return _itkQuadEdgeMeshToQuadEdgeMeshFilterPython.itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3
Create a new object of the class itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3.Clone = new_instancemethod(_itkQuadEdgeMeshToQuadEdgeMeshFilterPython.itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3_Clone, None, itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3)
itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3_swigregister = _itkQuadEdgeMeshToQuadEdgeMeshFilterPython.itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3_swigregister
itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3_swigregister(itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3)
def itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3___New_orig__() -> "itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3_Pointer":
"""itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3___New_orig__() -> itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3_Pointer"""
return _itkQuadEdgeMeshToQuadEdgeMeshFilterPython.itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3___New_orig__()
def itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3_cast(obj: 'itkLightObject') -> "itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3 *":
"""itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3_cast(itkLightObject obj) -> itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3"""
return _itkQuadEdgeMeshToQuadEdgeMeshFilterPython.itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3_cast(obj)
def quad_edge_mesh_to_quad_edge_mesh_filter(*args, **kwargs):
"""Procedural interface for QuadEdgeMeshToQuadEdgeMeshFilter"""
import itk
instance = itk.QuadEdgeMeshToQuadEdgeMeshFilter.New(*args, **kwargs)
return instance.__internal_call__()
def quad_edge_mesh_to_quad_edge_mesh_filter_init_docstring():
import itk
import itkTemplate
if isinstance(itk.QuadEdgeMeshToQuadEdgeMeshFilter, itkTemplate.itkTemplate):
quad_edge_mesh_to_quad_edge_mesh_filter.__doc__ = itk.QuadEdgeMeshToQuadEdgeMeshFilter.values()[0].__doc__
else:
quad_edge_mesh_to_quad_edge_mesh_filter.__doc__ = itk.QuadEdgeMeshToQuadEdgeMeshFilter.__doc__
| [
"[email protected]"
] | |
c1825c451ebce3e5a90f216fa0ea0683c035ad0d | 34f6d9a4c4becc057d1b01a0ed3e50f20a071b03 | /main/migrations/0001_initial.py | 55e4c7b7da9e0f6d8dea069a8d80d7bc81e61042 | [] | no_license | hitscanner/WUW | e6d59bb8eae3834cf115e50834a2a4af51c29b29 | 31a482afe3e4789c979696a70f5ded17488b7810 | refs/heads/master | 2022-12-10T06:01:01.862354 | 2019-08-11T11:31:01 | 2019-08-11T11:31:01 | 196,556,732 | 0 | 0 | null | 2022-07-06T20:13:05 | 2019-07-12T10:06:49 | JavaScript | UTF-8 | Python | false | false | 771 | py | # Generated by Django 2.2 on 2019-07-16 08:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Search_result',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('poster', models.ImageField(blank=True, upload_to='')),
('heart', models.ImageField(blank=True, upload_to='')),
('created_at', models.DateField(auto_now_add=True)),
('updated_at', models.DateField(auto_now=True)),
],
),
]
| [
"[email protected]"
] | |
cab7d49da20714d35bcfe777d586c4c4b8e8bcb1 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/eqpt/spcmnblk.py | 4a85bd179a0f4e21e2c358232362a472c0383c4d | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,084 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class SpCmnBlk(Mo):
"""
A SPROM common block.
"""
meta = ClassMeta("cobra.model.eqpt.SpCmnBlk")
meta.moClassName = "eqptSpCmnBlk"
meta.rnFormat = "spcmn"
meta.category = MoCategory.REGULAR
meta.label = "Sprom Common Block"
meta.writeAccessMask = 0x80080000000001
meta.readAccessMask = 0x80080000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.eqpt.SpromFan")
meta.parentClasses.add("cobra.model.eqpt.SpromLc")
meta.parentClasses.add("cobra.model.eqpt.SpromSup")
meta.parentClasses.add("cobra.model.eqpt.SpromPsu")
meta.parentClasses.add("cobra.model.eqpt.SpromBP")
meta.superClasses.add("cobra.model.eqpt.SpBlkHdr")
meta.rnPrefixes = [
('spcmn', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cksum", "cksum", 3358, PropCategory.REGULAR)
prop.label = "Checksum"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cksum", prop)
prop = PropMeta("str", "clei", "clei", 3375, PropCategory.REGULAR)
prop.label = "CLEI Code"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("clei", prop)
prop = PropMeta("str", "count", "count", 3360, PropCategory.REGULAR)
prop.label = "Block Count"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("count", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "engBits", "engBits", 3372, PropCategory.REGULAR)
prop.label = "Engineering Bits"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("engBits", prop)
prop = PropMeta("str", "hwRevMaj", "hwRevMaj", 3369, PropCategory.REGULAR)
prop.label = "Hardware Revision Major Number"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("hwRevMaj", prop)
prop = PropMeta("str", "hwRevMin", "hwRevMin", 3370, PropCategory.REGULAR)
prop.label = "Hardware Revision Minor Number"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("hwRevMin", prop)
prop = PropMeta("str", "len", "len", 3357, PropCategory.REGULAR)
prop.label = "Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("len", prop)
prop = PropMeta("str", "major", "major", 3361, PropCategory.REGULAR)
prop.label = "FRU Major Number"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("major", prop)
prop = PropMeta("str", "mfgBits", "mfgBits", 3371, PropCategory.REGULAR)
prop.label = "Manufacturing Bits"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("mfgBits", prop)
prop = PropMeta("str", "mfgDev", "mfgDev", 3368, PropCategory.REGULAR)
prop.label = "Manufacturing Deviation"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("mfgDev", prop)
prop = PropMeta("str", "minor", "minor", 3362, PropCategory.REGULAR)
prop.label = "FRU Minor Number"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("minor", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "oem", "oem", 3363, PropCategory.REGULAR)
prop.label = "OEM"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("oem", prop)
prop = PropMeta("str", "pRev", "pRev", 3367, PropCategory.REGULAR)
prop.label = "Part Revision"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("pRev", prop)
prop = PropMeta("str", "pdNum", "pdNum", 3364, PropCategory.REGULAR)
prop.label = "Product Number"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("pdNum", prop)
prop = PropMeta("str", "prtNum", "prtNum", 3366, PropCategory.REGULAR)
prop.label = "Part Number"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("prtNum", prop)
prop = PropMeta("str", "pwrCon", "pwrCon", 3373, PropCategory.REGULAR)
prop.label = "Power Consumption"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("pwrCon", prop)
prop = PropMeta("str", "ramFl", "ramFl", 3374, PropCategory.REGULAR)
prop.label = "RMA Failure Code"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("ramFl", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "serNum", "serNum", 3365, PropCategory.REGULAR)
prop.label = "Serial Number"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("serNum", prop)
prop = PropMeta("str", "sig", "sig", 3355, PropCategory.REGULAR)
prop.label = "Signature"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("sig", prop)
prop = PropMeta("str", "size", "size", 3359, PropCategory.REGULAR)
prop.label = "Block Size"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("size", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "vdrId", "vdrId", 3376, PropCategory.REGULAR)
prop.label = "Vendor ID"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("vdrId", prop)
prop = PropMeta("str", "ver", "ver", 3356, PropCategory.REGULAR)
prop.label = "Version"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("ver", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("EqptSlotToEPg", "EPG", "cobra.model.fv.EPg"))
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
d64a07214140198e05e8730f69a9a5dd80e5146d | aa3beba7d2e9eb7d4b5b4884f8d11203fe8ebe8e | /historical_bars_pandas_one_time.py | 4b41c3982182d22d010ea68d4e22e4cd423b3b3f | [] | no_license | webclinic017/historical_ticks-1 | 0d3d5113f16624f2191fff366c9e9f6ad2e72f1c | 8a30e6b6ea3c484f6cab95cf4d32fe3dd2695056 | refs/heads/main | 2023-08-05T03:07:59.258809 | 2021-09-13T12:22:59 | 2021-09-13T12:22:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,089 | py | import pandas as pd
import csv
import argparse
import datetime
import collections
import inspect
import logging
import os.path
import time
import datetime
from ibapi import wrapper
from ibapi import utils
from ibapi.client import EClient
from ibapi.utils import iswrapper
from ContractSamples import ContractSamples
from ibapi.ticktype import TickType, TickTypeEnum
from ibapi import wrapper
from ibapi.client import EClient
from ibapi.wrapper import EWrapper
# types
from ibapi.common import * # @UnusedWildImport
from ibapi.order import * # @UnusedWildImport
from DBHelperMay import DBHelper
def SetupLogger():
if not os.path.exists("log"):
os.makedirs("log")
time.strftime("pyibapi.%Y%m%d_%H%M%S.log")
recfmt = '(%(threadName)s) %(asctime)s.%(msecs)03d %(levelname)s %(filename)s:%(lineno)d %(message)s'
timefmt = '%y%m%d_%H:%M:%S'
# logging.basicConfig( level=logging.DEBUG,
# format=recfmt, datefmt=timefmt)
logging.basicConfig(filename=time.strftime("log/pyibapi.%y%m%d_%H%M%S.log"),
filemode="w",
level=logging.INFO,
format=recfmt, datefmt=timefmt)
logger = logging.getLogger()
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logger.addHandler(console)
def printWhenExecuting(fn):
def fn2(self):
print(" doing", fn.__name__)
fn(self)
print(" done w/", fn.__name__)
return fn2
def printinstance(inst:Object):
attrs = vars(inst)
print(', '.join("%s: %s" % item for item in attrs.items()))
class Activity(Object):
def __init__(self, reqMsgId, ansMsgId, ansEndMsgId, reqId):
self.reqMsdId = reqMsgId
self.ansMsgId = ansMsgId
self.ansEndMsgId = ansEndMsgId
self.reqId = reqId
class RequestMgr(Object):
def __init__(self):
# I will keep this simple even if slower for now: only one list of
# requests finding will be done by linear search
self.requests = []
def addReq(self, req):
self.requests.append(req)
def receivedMsg(self, msg):
pass
# ! [socket_init]
class TestApp(EWrapper, EClient):
def __init__(self):
EWrapper.__init__(self)
EClient.__init__(self, wrapper=self)
# ! [socket_init]
self.nKeybInt = 0
self.started = False
self.nextValidOrderId = None
self.permId2ord = {}
self.globalCancelOnly = False
self.simplePlaceOid = None
self._my_errors = {}
# pandas lines # https://stackoverflow.com/questions/58524845/is-there-a-proper-way-to-produce-a-ohlcv-pandas-dataframe-using-ib-api
# https://stackoverflow.com/questions/62416071/storing-api-data-into-a-dataframe
self.cols = ['date', 'open', 'high', 'low', 'close', 'volume']
self.df = pd.DataFrame(columns=self.cols)
# def dumpReqAnsErrSituation(self):
# logging.debug("%s\t%s\t%s\t%s" % ("ReqId", "#Req", "#Ans", "#Err"))
# for reqId in sorted(self.reqId2nReq.keys()):
# nReq = self.reqId2nReq.get(reqId, 0)
# nAns = self.reqId2nAns.get(reqId, 0)
# nErr = self.reqId2nErr.get(reqId, 0)
# logging.debug("%d\t%d\t%s\t%d" % (reqId, nReq, nAns, nErr))
@iswrapper
# ! [connectack]
def connectAck(self):
if self.asynchronous:
self.startApi()
# ! [connectack]
@iswrapper
# ! [nextvalidid]
def nextValidId(self, orderId: int):
super().nextValidId(orderId)
logging.debug("setting nextValidOrderId: %d", orderId)
self.nextValidOrderId = orderId
print("NextValidId:", orderId)
# ! [nextvalidid]
# we can start now
self.start()
def start(self):
if self.started:
return
self.started = True
if self.globalCancelOnly:
print("Executing GlobalCancel only")
self.reqGlobalCancel()
else:
print("Executing requests")
# self.tickDataOperations_req()
# self.historicalTicksOperations()
# self.reqGlobalCancel()
# self.marketDataTypeOperations()
# self.accountOperations_req()
# self.tickDataOperations_req()
# self.marketDepthOperations_req()
# self.realTimeBarsOperations_req()
self.historicalDataOperations_req()
# self.optionsOperations_req()
# self.marketScannersOperations_req()
# self.fundamentalsOperations_req()
# self.bulletinsOperations_req()
# self.contractOperations()
# self.newsOperations_req()
# self.miscelaneousOperations()
# self.linkingOperations()
# self.financialAdvisorOperations()
# self.orderOperations_req()
# self.rerouteCFDOperations()
# self.marketRuleOperations()
# self.pnlOperations_req()
# self.histogramOperations_req()
# self.continuousFuturesOperations_req()
# self.historicalTicksOperations()
# self.tickByTickOperations_req()
# self.whatIfOrderOperations()
print("Executing requests ... finished")
def keyboardInterrupt(self):
self.nKeybInt += 1
if self.nKeybInt == 1:
self.stop()
else:
print("Finishing test")
self.done = True
def stop(self):
print("Executing cancels")
# self.orderOperations_cancel()
# self.accountOperations_cancel()
# self.tickDataOperations_cancel()
# self.marketDepthOperations_cancel()
# self.realTimeBarsOperations_cancel()
self.historicalDataOperations_cancel()
# self.optionsOperations_cancel()
# self.marketScanners_cancel()
# self.fundamentalsOperations_cancel()
# self.bulletinsOperations_cancel()
# self.newsOperations_cancel()
# self.pnlOperations_cancel()
# self.histogramOperations_cancel()
# self.continuousFuturesOperations_cancel()
# self.tickByTickOperations_cancel()
print("Executing cancels ... finished")
def nextOrderId(self):
oid = self.nextValidOrderId
self.nextValidOrderId += 1
return oid
@iswrapper
# ! [error]
def error(self, reqId: TickerId, errorCode: int, errorString: str):
super().error(reqId, errorCode, errorString)
print("Error. Id:", reqId, "Code:", errorCode, "Msg:", errorString)
errormsg = "IB error id %d errorcode %d string %s" % (reqId, errorCode, errorString)
self._my_errors = errormsg
@iswrapper
def winError(self, text: str, lastError: int):
super().winError(text, lastError)
@printWhenExecuting
def tickByTickOperations_req(self):
# Requesting tick-by-tick data (only refresh)
# ! [reqtickbytick]
self.reqTickByTickData(19001, ContractSamples.EuropeanStock2(), "Last", 0, True)
self.reqTickByTickData(19002, ContractSamples.EuropeanStock2(), "AllLast", 0, False)
self.reqTickByTickData(19003, ContractSamples.EuropeanStock2(), "BidAsk", 0, True)
self.reqTickByTickData(19004, ContractSamples.EurGbpFx(), "MidPoint", 0, False)
# ! [reqtickbytick]
# Requesting tick-by-tick data (refresh + historicalticks)
# ! [reqtickbytickwithhist]
self.reqTickByTickData(19005, ContractSamples.SimpleFuture(), "Last", 10, False)
self.reqTickByTickData(19006, ContractSamples.SimpleFuture(), "AllLast", 10, False)
self.reqTickByTickData(19007, ContractSamples.SimpleFuture(), "BidAsk", 10, False)
self.reqTickByTickData(19008, ContractSamples.SimpleFuture(), "MidPoint", 10, True)
# ! [reqtickbytickwithhist]
@printWhenExecuting
def historicalDataOperations_req(self, num_days = '20 D'):
self.num_days = num_days
queryTime = (datetime.datetime.today() - datetime.timedelta(days=7)).strftime("%Y%m%d %H:%M:%S")
self.reqHistoricalData(4103, ContractSamples.SimpleFuture(), queryTime, self.num_days, "1 day", "TRADES", 1, 1, False, [])
# self.reqHistoricalData(4104, ContractSamples.SimpleFuture(), "",
# "1 M", "1 day", "MIDPOINT", 1, 1, True, [])
# ! [reqhistoricaldata]
def historicalData(self, reqId: int, bar: BarData):
# print("HistoricalData. ReqId:", reqId, "BarData.", bar)
# print(bar.date, bar.open, bar.high, bar.low, bar.close, bar.volume)
self.df.loc[len(self.df)] = [bar.date, bar.open, bar.high, bar.low, bar.close, bar.volume]
self.df.to_csv('history2.csv')
print(self.df)
# ! [historicaldata]
@printWhenExecuting
def historicalDataOperations_cancel(self):
# ! [cancelHeadTimestamp]
# self.cancelHeadTimeStamp(4101)
# ! [cancelHeadTimestamp]
# ! [cancelHeadTimestamp]
# Canceling historical data requests
# ! [cancelhistoricaldata]
# self.cancelHistoricalData(4102)
self.cancelHistoricalData(4103)
# self.cancelHistoricalData(4104)
# ! [cancelhistoricaldata]
# @iswrapper
# # ! [historicaldataend]
# def historicalDataEnd(self, reqId: int, start: str, end: str):
# super().historicalDataEnd(reqId, start, end)
# print("HistoricalDataEnd. ReqId:", reqId, "from", start, "to", end)
#
# # ! [historicaldataend]
#
# @iswrapper
# # ! [historicalDataUpdate]
# def historicalDataUpdate(self, reqId: int, bar: BarData):
# print("HistoricalDataUpdate. ReqId:", reqId, "BarData.", bar)
#
# # ! [historicalDataUpdate]
#
# def historicalData(self, reqId:int, bar: BarData):
# print("HistoricalData. ReqId:", reqId, "BarData.", bar)
# logging.debug("ReqId:", reqId, "BarData.", bar)
# # self.disconnect()
@iswrapper
def tickPrice(self, tickerId: TickerId , tickType: TickType, price: float, attrib):
super().tickPrice(tickerId, tickType, price, attrib)
print("Tick Price, Ticker Id:", tickerId, "tickType:", TickTypeEnum.to_str(tickType), "Price:", price, " Time:", attrib.time, file=sys.stderr, end= " ")
@iswrapper
def tickSize(self, tickerId: TickerId, tickType: TickType, size: int):
super().tickSize(tickerId, tickType, size)
print( "Tick Size, Ticker Id:",tickerId, "tickType:", TickTypeEnum.to_str(tickType), "Size:", size, file=sys.stderr)
def tickByTickAllLast(self, reqId: int, tickType: int, time: int, price: float,
size: int, tickAttribLast: TickAttribLast, exchange: str,
specialConditions: str):
super().tickByTickAllLast(reqId, tickType, time, price, size, tickAttribLast,
exchange, specialConditions)
if tickType == 1:
print("Last.", end='')
else:
print("AllLast.", end='')
print(" ReqId:", reqId,
"Time:", datetime.datetime.fromtimestamp(time).strftime("%Y%m%d %H:%M:%S"),
"Price:", price, "Size:", size, "Exch:", exchange,
"Spec Cond:", specialConditions, "PastLimit:", tickAttribLast.pastLimit, "Unreported:",
tickAttribLast.unreported)
def main():
SetupLogger()
logging.getLogger().setLevel(logging.ERROR)
cmdLineParser = argparse.ArgumentParser("api tests")
# cmdLineParser.add_option("-c", action="store_True", dest="use_cache", default = False, help = "use the cache")
# cmdLineParser.add_option("-f", action="store", type="string", dest="file", default="", help="the input file")
cmdLineParser.add_argument("-p", "--port", action="store", type=int,
dest="port", default=7497, help="The TCP port to use")
cmdLineParser.add_argument("-C", "--global-cancel", action="store_true",
dest="global_cancel", default=False,
help="whether to trigger a globalCancel req")
args = cmdLineParser.parse_args()
print("Using args", args)
logging.debug("Using args %s", args)
# print(args)
# tc = TestClient(None)
# tc.reqMktData(1101, ContractSamples.USStockAtSmart(), "", False, None)
# print(tc.reqId2nReq)
# sys.exit(1)
try:
app = TestApp()
if args.global_cancel:
app.globalCancelOnly = True
# ! [connect]
app.connect("127.0.0.1", args.port, clientId=3)
# ! [connect]
print("serverVersion:%s connectionTime:%s" % (app.serverVersion(),
app.twsConnectionTime()))
# ! [clientrun]
app.run()
time.sleep(2)
app.disconnect()
# ! [clientrun]
except:
raise
# finally:
# app.dumpTestCoverageSituation()
# app.dumpReqAnsErrSituation()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
877d1c526b3f20bd3188f83451ed138f7a56e486 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_sniggering.py | 17cd2b9a2bb4518486b4ed491fb93307ec1d284d | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py |
#calss header
class _SNIGGERING():
def __init__(self,):
self.name = "SNIGGERING"
self.definitions = snigger
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['snigger']
| [
"[email protected]"
] | |
ac7098eb210e84d502ecbef2178e0288d257fa61 | 010215c1421f5275a846e7154189b22cdd3c89bc | /MS/Two Pointer/backspace_compare.py | b930b074d11fe2ed02fd6c809b5f4f8223bf58ac | [] | no_license | bsextion/CodingPractice_Py | ab54d5715298645a8fd7ab6945bf3b22d4e6a874 | da2847a04705394c32a6fe1b5f6c6b64c24647a3 | refs/heads/master | 2023-08-16T17:14:47.643989 | 2021-09-28T19:23:40 | 2021-09-28T19:23:40 | 383,658,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | def backspace_compare(str1:str, str2):
#two pointers,
ptr_one = 0
ptr_two = 0
while ptr_one < len(str1):
if str1[ptr_one] is '#' and ptr_one > 0:
temp = list(str1)
temp[ptr_one-1] = ''
temp[ptr_one] = ''
str1 = ''.join(temp)
ptr_one += 1
while ptr_two < len(str2):
if str2[ptr_two] is '#' and ptr_two > 0:
temp = list(str2)
temp[ptr_two - 1] = ''
temp[ptr_two] = ''
str2 = ''.join(temp)
ptr_two += 1
if str1 == str2:
return True
return False
backspace_compare("xp#", "xyz##")
| [
"[email protected]"
] | |
b6677daaa5433a1a5bff104dbd781005f9caa6ad | bdba52c756cc09f192b720ea318510c265665dcd | /swagger_client/models/get_characters_character_id_planets_planet_id_head.py | d6daf4f3b6cdc6f78e96cc5957b82e43bb1a5d74 | [
"MIT"
] | permissive | rseichter/bootini-star | 6b38195890f383615cc2b422c365ac28c5b87292 | a80258f01a05e4df38748b8cb47dfadabd42c20d | refs/heads/master | 2020-03-14T03:17:11.385048 | 2018-06-28T17:23:23 | 2018-06-28T17:23:23 | 131,416,504 | 0 | 0 | MIT | 2018-05-01T14:26:04 | 2018-04-28T14:28:46 | Python | UTF-8 | Python | false | false | 5,555 | py | # coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online # noqa: E501
OpenAPI spec version: 0.8.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GetCharactersCharacterIdPlanetsPlanetIdHead(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'head_id': 'int',
'latitude': 'float',
'longitude': 'float'
}
attribute_map = {
'head_id': 'head_id',
'latitude': 'latitude',
'longitude': 'longitude'
}
def __init__(self, head_id=None, latitude=None, longitude=None): # noqa: E501
"""GetCharactersCharacterIdPlanetsPlanetIdHead - a model defined in Swagger""" # noqa: E501
self._head_id = None
self._latitude = None
self._longitude = None
self.discriminator = None
self.head_id = head_id
self.latitude = latitude
self.longitude = longitude
@property
def head_id(self):
"""Gets the head_id of this GetCharactersCharacterIdPlanetsPlanetIdHead. # noqa: E501
head_id integer # noqa: E501
:return: The head_id of this GetCharactersCharacterIdPlanetsPlanetIdHead. # noqa: E501
:rtype: int
"""
return self._head_id
@head_id.setter
def head_id(self, head_id):
"""Sets the head_id of this GetCharactersCharacterIdPlanetsPlanetIdHead.
head_id integer # noqa: E501
:param head_id: The head_id of this GetCharactersCharacterIdPlanetsPlanetIdHead. # noqa: E501
:type: int
"""
if head_id is None:
raise ValueError("Invalid value for `head_id`, must not be `None`") # noqa: E501
if head_id is not None and head_id > 9: # noqa: E501
raise ValueError("Invalid value for `head_id`, must be a value less than or equal to `9`") # noqa: E501
if head_id is not None and head_id < 0: # noqa: E501
raise ValueError("Invalid value for `head_id`, must be a value greater than or equal to `0`") # noqa: E501
self._head_id = head_id
@property
def latitude(self):
"""Gets the latitude of this GetCharactersCharacterIdPlanetsPlanetIdHead. # noqa: E501
latitude number # noqa: E501
:return: The latitude of this GetCharactersCharacterIdPlanetsPlanetIdHead. # noqa: E501
:rtype: float
"""
return self._latitude
@latitude.setter
def latitude(self, latitude):
"""Sets the latitude of this GetCharactersCharacterIdPlanetsPlanetIdHead.
latitude number # noqa: E501
:param latitude: The latitude of this GetCharactersCharacterIdPlanetsPlanetIdHead. # noqa: E501
:type: float
"""
if latitude is None:
raise ValueError("Invalid value for `latitude`, must not be `None`") # noqa: E501
self._latitude = latitude
@property
def longitude(self):
"""Gets the longitude of this GetCharactersCharacterIdPlanetsPlanetIdHead. # noqa: E501
longitude number # noqa: E501
:return: The longitude of this GetCharactersCharacterIdPlanetsPlanetIdHead. # noqa: E501
:rtype: float
"""
return self._longitude
@longitude.setter
def longitude(self, longitude):
"""Sets the longitude of this GetCharactersCharacterIdPlanetsPlanetIdHead.
longitude number # noqa: E501
:param longitude: The longitude of this GetCharactersCharacterIdPlanetsPlanetIdHead. # noqa: E501
:type: float
"""
if longitude is None:
raise ValueError("Invalid value for `longitude`, must not be `None`") # noqa: E501
self._longitude = longitude
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetCharactersCharacterIdPlanetsPlanetIdHead):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
c4f36d4b7fa6d6a4966f1a22288517df1842a6e4 | 480e33f95eec2e471c563d4c0661784c92396368 | /Configuration/Generator/python/QCD_Pt-20toInf_MuEnrichedPt15_TuneCUETP8M1_13TeV_pythia8_cff.py | 7ca22905e2e7cd1528d55e528353b0c20e2ccb2d | [
"Apache-2.0"
] | permissive | cms-nanoAOD/cmssw | 4d836e5b76ae5075c232de5e062d286e2026e8bd | 4eccb8a758b605875003124dd55ea58552b86af1 | refs/heads/master-cmsswmaster | 2021-01-23T21:19:52.295420 | 2020-08-27T08:01:20 | 2020-08-27T08:01:20 | 102,867,729 | 7 | 14 | Apache-2.0 | 2022-05-23T07:58:09 | 2017-09-08T14:03:57 | C++ | UTF-8 | Python | false | false | 2,277 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(0.00042),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(7.20648e+08),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'ParticleDecays:limitTau0 = off',
'ParticleDecays:limitCylinder = on',
'ParticleDecays:xyMax = 2000',
'ParticleDecays:zMax = 4000',
'HardQCD:all = on',
'PhaseSpace:pTHatMin = 20',
'130:mayDecay = on',
'211:mayDecay = on',
'321:mayDecay = on'
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
mugenfilter = cms.EDFilter("MCSmartSingleParticleFilter",
MinPt = cms.untracked.vdouble(15.,15.),
MinEta = cms.untracked.vdouble(-2.5,-2.5),
MaxEta = cms.untracked.vdouble(2.5,2.5),
ParticleID = cms.untracked.vint32(13,-13),
Status = cms.untracked.vint32(1,1),
# Decay cuts are in mm
MaxDecayRadius = cms.untracked.vdouble(2000.,2000.),
MinDecayZ = cms.untracked.vdouble(-4000.,-4000.),
MaxDecayZ = cms.untracked.vdouble(4000.,4000.)
)
configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('\$Revision$'),
name = cms.untracked.string('\$Source$'),
annotation = cms.untracked.string('QCD dijet production, pThat > 20 GeV, with INCLUSIVE muon preselection (pt(mu) > 15 GeV), 13 TeV, TuneCUETP8M1')
)
ProductionFilterSequence = cms.Sequence(generator*mugenfilter)
| [
"[email protected]"
] | |
ba2213f9f76fd58af80066c34ca0933cca61dfbe | 8b2b497069ed3db150e15863559dc0e9a44dc8c1 | /pure_protobuf/io/url.py | 3e2004fb24b0f96f02a7ce5efe77d39b2a72b5a2 | [
"MIT"
] | permissive | eigenein/protobuf | 2aec2c544cf9f6571b161b1e62ec3675a5b141eb | cf14bc702302c9334c7c9cc839b0b24334a725ef | refs/heads/master | 2023-08-31T21:23:29.258800 | 2023-08-27T12:00:26 | 2023-08-28T12:36:25 | 1,890,285 | 216 | 20 | MIT | 2023-09-13T12:58:54 | 2011-06-13T18:26:55 | Python | UTF-8 | Python | false | false | 619 | py | """Reading and writing parsed URLs."""
from typing import IO, Iterator
from urllib.parse import ParseResult, urlparse, urlunparse
from pure_protobuf.interfaces.read import Read
from pure_protobuf.interfaces.write import Write
from pure_protobuf.io.bytes_ import read_string, write_string
class ReadUrl(Read[ParseResult]):
__slots__ = ()
def __call__(self, io: IO[bytes]) -> Iterator[ParseResult]:
yield urlparse(read_string(io))
class WriteUrl(Write[ParseResult]):
__slots__ = ()
def __call__(self, value: ParseResult, io: IO[bytes]) -> None:
write_string(urlunparse(value), io)
| [
"[email protected]"
] | |
aee56c11569ff5461d903c1776810a2242a2f6ce | 12c15c7ae150acaf8032f444db24440da2234b1a | /ComputerVision/Projects/cv20_proj1/lap.py | f91c55cde634d08ca8ca04d68cc2380417508879 | [] | no_license | Jimut123/rkmveri-labs | 315ecd4607af72dd0851489e427a3ab09a8009ff | be19a453ea32460c454e3443798e3d8954fb084b | refs/heads/master | 2023-02-02T17:11:23.641187 | 2020-12-13T18:35:20 | 2020-12-13T18:35:20 | 201,784,550 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,476 | py | import numpy as np
import cv2
cutoff_frequency = 4
filter = cv2.getGaussianKernel(ksize=cutoff_frequency*4+1,
sigma=cutoff_frequency)
filter = np.dot(filter, filter.T)
def del2(M):
dx = 1
dy = 1
rows, cols = M.shape
dx = dx * np.ones ((1, cols - 1))
dy = dy * np.ones ((rows-1, 1))
mr, mc = M.shape
D = np.zeros ((mr, mc))
if (mr >= 3):
## x direction
## left and right boundary
D[:, 0] = (M[:, 0] - 2 * M[:, 1] + M[:, 2]) / (dx[:,0] * dx[:,1])
D[:, mc-1] = (M[:, mc - 3] - 2 * M[:, mc - 2] + M[:, mc-1]) \
/ (dx[:,mc - 3] * dx[:,mc - 2])
## interior points
tmp1 = D[:, 1:mc - 1]
tmp2 = (M[:, 2:mc] - 2 * M[:, 1:mc - 1] + M[:, 0:mc - 2])
tmp3 = np.kron (dx[:,0:mc -2] * dx[:,1:mc - 1], np.ones ((mr, 1)))
D[:, 1:mc - 1] = tmp1 + tmp2 / tmp3
if (mr >= 3):
## y direction
## top and bottom boundary
D[0, :] = D[0,:] + \
(M[0, :] - 2 * M[1, :] + M[2, :] ) / (dy[0,:] * dy[1,:])
D[mr-1, :] = D[mr-1, :] \
+ (M[mr-3,:] - 2 * M[mr-2, :] + M[mr-1, :]) \
/ (dy[mr-3,:] * dx[:,mr-2])
## interior points
tmp1 = D[1:mr-1, :]
tmp2 = (M[2:mr, :] - 2 * M[1:mr - 1, :] + M[0:mr-2, :])
tmp3 = np.kron (dy[0:mr-2,:] * dy[1:mr-1,:], np.ones ((1, mc)))
D[1:mr-1, :] = tmp1 + tmp2 / tmp3
return D / 4
print(del2(filter))
| [
"[email protected]"
] | |
c89b9794cbf2b7f1b847fd4e611f0c42b5aa35fa | 4771ca5cd2c7be8e6d0a50f1e0b1f85a17ec5efd | /todos/forms.py | 51e119676bbd8af1823a81a48e4933eac9090377 | [] | no_license | luanfonceca/todomvc-django-over-the-wire | 03aa2e57c04d465c56cf06e1c95b417c502bcbad | ae1b6e989c0c9edd7d4f8de2d9553bf57e4e1e38 | refs/heads/main | 2023-03-03T19:23:35.849691 | 2021-02-07T13:23:17 | 2021-02-07T13:23:17 | 334,795,276 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py |
from django import forms
from todos.models import ToDo
class ToDoForm(forms.ModelForm):
class Meta:
model = ToDo
fields = ('title',)
class CompleteToDoForm(forms.ModelForm):
class Meta:
model = ToDo
fields = ('is_completed',)
| [
"[email protected]"
] | |
2212a2b636017e168a6d9d41201b0c3c70163ac9 | 057d662a83ed85897e9906d72ea90fe5903dccc5 | /Comprehension.py | 68427686ee2e27abe1a3290558f7865fd4fd49bb | [] | no_license | Karishma00/AnsiblePractice | 19a4980b1f6cca7b251f2cbea3acf9803db6e016 | 932558d48869560a42ba5ba3fb72688696e1868a | refs/heads/master | 2020-08-05T00:05:31.679220 | 2019-10-04T13:07:29 | 2019-10-04T13:07:29 | 212,324,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | #
list=[x**2 for x in range(0,10)]
print(list)
#another ex
celcius=[0,10,30,90]
fahrenheiet=[((0/5)*temp +32) for temp in celcius]
print(fahrenheiet) | [
"[email protected]"
] | |
1d806f235836b0b15c4c2615bbf176dff8458479 | 82be2ebd50fef5b359cfbcacd21f38da4c383ffc | /tests/test_writer.py | a340f79117bfab8297d6e6b0fb63a8be472e2988 | [
"BSD-3-Clause"
] | permissive | isabella232/helium-commander | 5eae81b89cccf2dae56a4163815d867777387288 | 58d1fe4064c51beccbff7a0d93bf037fffdac370 | refs/heads/master | 2021-06-15T15:16:00.139651 | 2017-02-28T23:22:36 | 2017-02-28T23:22:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,170 | py | from helium_commander import Sensor, DataPoint
from itertools import islice
import pytest
def validate_format(output, client, sensors, capsys):
first_sensor = sensors[0]
# With sort
Sensor.display(client, sensors, format=output, sort='name')
out, err = capsys.readouterr()
assert first_sensor.short_id in out
Sensor.display(client, sensors, format=output, sort='name', reverse=True)
reversed, err = capsys.readouterr()
assert reversed != out
# Without sort
Sensor.display(client, sensors, format=output)
out, err = capsys.readouterr()
assert first_sensor.short_id in out
Sensor.display(client, sensors, format=output, reverse=True)
reversed, err = capsys.readouterr()
assert reversed != out
def test_formats(client, sensors, capsys):
for output in ['csv', 'tabular', 'json']:
validate_format(output, client, sensors, capsys)
with pytest.raises(AttributeError):
Sensor.display(client, sensors, format='xxx')
def test_timeseries(client, authorized_organization):
points = islice(authorized_organization.timeseries(), 10)
DataPoint.display(client, points, max_width=20)
| [
"[email protected]"
] | |
102a94ec2318f2e1673fd0e494380451db909578 | 0e7aed5eef2e1d132a7e75dd8f439ae76c87639c | /python/652_find_duplicated_subtrees.py | ee4a673bdc3ecbf54bdd00a403e289703d72c886 | [
"MIT"
] | permissive | liaison/LeetCode | 2a93df3b3ca46b34f922acdbc612a3bba2d34307 | bf03743a3676ca9a8c107f92cf3858b6887d0308 | refs/heads/master | 2022-09-05T15:04:19.661298 | 2022-08-19T19:29:19 | 2022-08-19T19:29:19 | 52,914,957 | 17 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,429 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def findDuplicateSubtrees(self, root: TreeNode) -> List[TreeNode]:
# set of all node strings
node_str_set = set()
duplicated_strs = set()
duplicated_nodes = list()
def node2str(node):
"""
this function accomplishes two tasks:
- index each node into a string
- search the duplicated nodes during the traversal
"""
nonlocal node_str_set
nonlocal duplicated_strs
nonlocal duplicated_nodes
if node is None:
return ""
left_str = node2str(node.left)
right_str = node2str(node.right)
node_str = str(node.val) + "(" + left_str + ")" + "(" + right_str + ")"
if node_str in node_str_set:
if node_str not in duplicated_strs:
duplicated_strs.add(node_str)
duplicated_nodes.append(node)
else:
node_str_set.add(node_str)
return node_str
node2str(root)
return duplicated_nodes
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class SolutionCount:
def findDuplicateSubtrees(self, root: TreeNode) -> List[TreeNode]:
# node_str -> count
node_str_count = defaultdict(int)
duplicated_nodes = list()
def node2str(node):
"""
this function accomplishes two tasks:
- index each node into a string
- search the duplicated nodes during the traversal
"""
nonlocal node_str_count
nonlocal duplicated_nodes
if node is None:
return ""
node_str = "{}({})({})".format(
node.val, node2str(node.left), node2str(node.right))
node_str_count[node_str] += 1
if node_str_count[node_str] == 2:
duplicated_nodes.append(node)
return node_str
node2str(root)
return duplicated_nodes
| [
"[email protected]"
] | |
15fd9952fee0476a4522d0e9c5220985962185cf | 88abc8645e499a61e96e2979ae6092e98bfd09e7 | /streamz/utils.py | 4e1538da7c4506a9bf7fed145c07d2eb9fdde2bc | [
"BSD-3-Clause"
] | permissive | vishalbelsare/streamz | 5e2d6e112b6a2a90e396c4e3bc11cb1167d879e3 | b73a8c4c5be35ff1dae220daaefbfd2bfa58e0a1 | refs/heads/master | 2022-12-24T17:28:40.600327 | 2022-11-22T16:40:35 | 2022-11-22T16:40:35 | 207,001,623 | 0 | 0 | BSD-3-Clause | 2022-12-10T04:20:03 | 2019-09-07T17:20:32 | Python | UTF-8 | Python | false | false | 1,184 | py | _method_cache = {}
class methodcaller(object):
"""
Return a callable object that calls the given method on its operand.
Unlike the builtin `operator.methodcaller`, instances of this class are
serializable
"""
__slots__ = ('method',)
func = property(lambda self: self.method) # For `funcname` to work
def __new__(cls, method):
if method in _method_cache:
return _method_cache[method]
self = object.__new__(cls)
self.method = method
_method_cache[method] = self
return self
def __call__(self, obj, *args, **kwargs):
return getattr(obj, self.method)(*args, **kwargs)
def __reduce__(self):
return (methodcaller, (self.method,))
def __str__(self):
return "<%s: %s>" % (self.__class__.__name__, self.method)
__repr__ = __str__
class MethodCache(object):
"""Attribute access on this object returns a methodcaller for that
attribute.
Examples
--------
>>> a = [1, 3, 3]
>>> M.count(a, 3) == a.count(3)
True
"""
__getattr__ = staticmethod(methodcaller)
__dir__ = lambda self: list(_method_cache)
M = MethodCache()
| [
"[email protected]"
] | |
3a084bb437dc7e9fbb08e486b1cc9993909d21bb | 71d535545c4f3b2fc626cd04cfcee22805b67353 | /copacity_app/migrations/0007_auto_20210613_1019.py | c0f52b9073d682a55e6c58ee766848aa894fabd7 | [] | no_license | mcnalj/copacity_django | 01a018d32ee9cb9ba392e5dcd160d636ba0b5b74 | 48432cff7585af342599c06cac497947e4b68195 | refs/heads/master | 2023-07-04T14:27:50.736252 | 2021-08-10T16:53:59 | 2021-08-10T16:53:59 | 383,779,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,768 | py | # Generated by Django 3.1.7 on 2021-06-13 10:19
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('copacity_app', '0006_checkin_owner'),
]
operations = [
migrations.CreateModel(
name='Circle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('createdBy', models.CharField(max_length=50)),
('createdOn', models.DateTimeField(auto_now_add=True)),
('adminId', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='CircleMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('circle', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='copacity_app.circle')),
('inviter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='circle_invites', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='circle',
name='members',
field=models.ManyToManyField(related_name='circle_member', through='copacity_app.CircleMembership', to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
] | |
ef954e4bc9bcc4a0ca428034f6427da6e1577c8f | 07da31b260bf2949ffd9463ad4f777ca93b75d43 | /sleekforum/src/sleekapps/threads/views/post/post.py | 6f81800803801b5318b4dba53439f620da360d57 | [] | no_license | adepeter/sleek-docker | 134fd7de12ade8c521ceb8e1b2b2611fa2224dde | dcf010c3da53093600101d970c6888c82360209f | refs/heads/master | 2022-12-15T14:53:01.499098 | 2020-09-14T00:42:31 | 2020-09-14T00:42:31 | 282,499,689 | 0 | 0 | null | 2020-07-31T14:31:22 | 2020-07-25T18:12:19 | JavaScript | UTF-8 | Python | false | false | 1,459 | py | from django.contrib import messages
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.utils.translation import gettext_lazy as _
from ...forms.post.post import PostEditForm, PostForm
from ...viewmixins.post import BasePostMixin
TEMPLATE_URL = 'threads/post'
class EditPost(BasePostMixin, UpdateView):
form_class = PostEditForm
template_name = f'{TEMPLATE_URL}/edit_post.html'
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({'request': self.request})
return kwargs
def form_valid(self, form):
if not form.has_changed():
messages.success(self.request, _('No changes were made to your reply'))
else:
messages.success(self.request, _('Post was successfully edited.'))
return super().form_valid(form)
class DeletePost(BasePostMixin, DeleteView):
pass
class ReplyPost(BasePostMixin, CreateView):
form_class = PostForm
template_name = f'{TEMPLATE_URL}/reply_post.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['parent'] = self.get_object()
return context
def form_valid(self, form):
parent_object = self.get_object()
form.instance.thread = parent_object.thread
form.instance.parent = parent_object
form.instance.user = self.request.user
return super().form_valid(form) | [
"[email protected]"
] | |
418ff5b81b82739dbb020083e568e2276627c16e | fdaba69f8d3ae3e645cb548a31111814b67f88bc | /credit/xgboost_Chunk_join.py | 3d222a1d0c5720a80dc1aa63bfc2f49b7910ada3 | [] | no_license | curryli/pandasFlow | 6c381a06843f353f3449666cc9aee3e3fc2c3620 | 891963e1d9acd8cdd23732180a3fd4b4633bc335 | refs/heads/master | 2020-12-07T15:24:36.500075 | 2018-07-01T09:01:09 | 2018-07-01T09:01:09 | 95,520,789 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,752 | py | # -*- coding: utf-8 -*-
import pandas as pd
from sklearn.cross_validation import train_test_split
from sklearn.metrics import recall_score, precision_score
from sklearn.metrics import precision_recall_fscore_support
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.utils import shuffle
import datetime
from collections import Counter
from xgboost.sklearn import XGBClassifier
import numpy as np
start_time = datetime.datetime.now()
#################################################
#reader = pd.read_csv("new_FE_idx.csv", low_memory=False, iterator=True)
#reader = pd.read_csv("trans_small.csv", low_memory=False, iterator=True)
reader = pd.read_csv("cert_all_right.csv", low_memory=False, iterator=True)
loop = True
chunkSize = 100000
chunks = []
i = 0
while loop:
try:
chunk = reader.get_chunk(chunkSize)
chunks.append(chunk)
if (i%5)==0:
print i
i = i+1
except StopIteration:
loop = False
print "Iteration is stopped."
df_All = pd.concat(chunks, ignore_index=True)
print df_All.columns
#df_All = df_All.drop(["Trans_at","hist_fraud_cnt"], axis=1,inplace=False)
df_All = df_All[(df_All["label"] == 0) | (df_All["label"] == 1)]
df_All_stat = pd.read_csv("train_1108.csv", sep=',')
df_All_stat = df_All_stat[(df_All_stat["label"]==0) | (df_All_stat["label"]==1)]
df_All_stat= df_All_stat.drop( ["label"], axis=1,inplace=False)
df_All = pd.merge(left=df_All, right=df_All_stat, how='left', left_on='certid', right_on='certid')
df_All = shuffle(df_All)
df_All = df_All.fillna(-1)
df_X = df_All.drop(["label","certid","card_no"], axis=1,inplace=False)
df_y = df_All[["certid","label"]]
X_train, X_test, y_train, y_test = train_test_split(df_X, df_y, test_size=0.2)
np.savetxt("X_train_cols.csv",np.array(X_train.columns),fmt="%s" )
###############################################
certid_test = y_test
y_train = y_train.drop(["certid"], axis=1,inplace=False)
y_test = y_test.drop(["certid"], axis=1,inplace=False)
clf = XGBClassifier(learning_rate =0.1,n_estimators=500,max_depth=5,gamma=0.05,subsample=0.8,colsample_bytree=0.8,objective= 'binary:logistic', reg_lambda=1,seed=27)
print "start training"
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
cm1=confusion_matrix(y_test,pred)
print cm1
print "For Trans:\n"
result = precision_recall_fscore_support(y_test,pred)
#print result
precision_0 = result[0][0]
recall_0 = result[1][0]
f1_0 = result[2][0]
precision_1 = result[0][1]
recall_1 = result[1][1]
f1_1 = result[2][1]
print "precision_0: ", precision_0," recall_0: ", recall_0, " f1_0: ", f1_0
#print "certid_test_ori\n",certid_test
certid_test.index = range(certid_test.shape[0])
#print "certid_test\n",certid_test
certid_pred = pd.DataFrame(pred,columns=["pred"])
#print "certid_pred\n", certid_pred
certid_DF = pd.concat([certid_test,certid_pred], axis=1, ignore_index=True)
certid_DF.columns = ["certid","label","pred"]
#print "certid_DF\n",certid_DF
print certid_DF.dtypes
certid_DF.to_csv("certid_DF_drop.csv")
certid_grouped = certid_DF.groupby([certid_DF['certid']])
#certid_grouped = certid_DF.groupby([certid_DF['certid']], as_index=False)
# def label_cnt(arr): # 同一个人出现次数最多的元素
# cnt_set = Counter(arr)
# max_cnt_pair = cnt_set.most_common(1)[0] # (maxitem,maxcount)
# return max_cnt_pair[0]
def label_cnt(arr): # 同一个人出现次数最多的元素
cnt_0 = 0
arr_values = arr.values
for i in range(len(arr_values)):
if arr_values[i]==float(0):
cnt_0 = cnt_0+1
if(cnt_0>0):
return 0
else:
return 1
agg_dict = {}
agg_dict["pred"] = [label_cnt]
agg_stat_df = certid_grouped.agg(agg_dict)
agg_stat_df.columns = agg_stat_df.columns.map('{0[0]}-{0[1]}'.format)
#https://www.cnblogs.com/hhh5460/p/7067928.html
agg_stat_df.reset_index(level=0, inplace=True)
#print agg_stat_df
pred_label_DF = agg_stat_df[["certid", "pred-label_cnt"]]
true_label_DF = certid_test.drop_duplicates()
compare_df = pd.merge(left=true_label_DF, right=pred_label_DF, how='left', left_on='certid', right_on='certid')
y_test = compare_df["label"]
pred = compare_df["pred-label_cnt"]
cm2=confusion_matrix(y_test,pred)
print cm2
print "For Person:\n"
result = precision_recall_fscore_support(y_test,pred)
#print result
precision_0 = result[0][0]
recall_0 = result[1][0]
f1_0 = result[2][0]
precision_1 = result[0][1]
recall_1 = result[1][1]
f1_1 = result[2][1]
print "precision_0: ", precision_0," recall_0: ", recall_0, " f1_0: ", f1_0
end_time = datetime.datetime.now()
delta_time = str((end_time-start_time).total_seconds())
print "cost time",delta_time,"s"
| [
"[email protected]"
] | |
4b3187694d3f43ef8b7ee834c64e18fad6e4b5d3 | 2290eed5c494202beea0da1b9257a38b7a4403d2 | /script/[662]二叉树最大宽度.py | 382e8af49c05465745fefb5b155a1be322b7d57b | [] | no_license | DSXiangLi/Leetcode_python | 4b1c9848ea774955fb252b9bd796ba8d46ad728e | a2ef0ba5e86405dbf68dbc1ffeb086c7d864db1d | refs/heads/main | 2022-09-01T04:34:04.260402 | 2022-08-20T01:12:27 | 2022-08-20T01:12:27 | 445,347,891 | 1 | 0 | null | 2022-07-23T06:32:14 | 2022-01-07T00:15:20 | Python | UTF-8 | Python | false | false | 2,352 | py | # 给定一个二叉树,编写一个函数来获取这个树的最大宽度。树的宽度是所有层中的最大宽度。这个二叉树与满二叉树(full binary tree)结构相同,但一些节
# 点为空。
#
# 每一层的宽度被定义为两个端点(该层最左和最右的非空节点,两端点间的null节点也计入长度)之间的长度。
#
# 示例 1:
#
#
# 输入:
#
# 1
# / \
# 3 2
# / \ \
# 5 3 9
#
# 输出: 4
# 解释: 最大值出现在树的第 3 层,宽度为 4 (5,3,null,9)。
#
#
# 示例 2:
#
#
# 输入:
#
# 1
# /
# 3
# / \
# 5 3
#
# 输出: 2
# 解释: 最大值出现在树的第 3 层,宽度为 2 (5,3)。
#
#
# 示例 3:
#
#
# 输入:
#
# 1
# / \
# 3 2
# /
# 5
#
# 输出: 2
# 解释: 最大值出现在树的第 2 层,宽度为 2 (3,2)。
#
#
# 示例 4:
#
#
# 输入:
#
# 1
# / \
# 3 2
# / \
# 5 9
# / \
# 6 7
# 输出: 8
# 解释: 最大值出现在树的第 4 层,宽度为 8 (6,null,null,null,null,null,null,7)。
#
#
# 注意: 答案在32位有符号整数的表示范围内。
# Related Topics 树 深度优先搜索 广度优先搜索 二叉树 👍 384 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def widthOfBinaryTree(self, root: Optional[TreeNode]) -> int:
maxw = 0
stack = [(root,0)]
while stack:
l = len(stack)
left = 0
for i in range(l):
node, pos = stack.pop(0)
if i==0:
left= pos
if node.left:
stack.append((node.left, pos*2))
if node.right:
stack.append((node.right, pos*2+1))
if i==l-1:
maxw = max(maxw, pos-left+1)
return maxw
# leetcode submit region end(Prohibit modification and deletion)
| [
"[email protected]"
] | |
ea41bf74cd26d0892bb73965c448beea182cf8f0 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnconveyanc.py | 8b322095af1ce66ac5cee14556ce11e1e57f9dc2 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 158 | py | ii = [('ClarGE2.py', 7), ('AinsWRR3.py', 1), ('ClarGE.py', 2), ('WadeJEB.py', 6), ('HaliTBC.py', 1), ('MereHHB2.py', 1), ('ClarGE3.py', 2), ('DibdTRL.py', 4)] | [
"[email protected]"
] | |
68a8b7bca0c433c9063cdb4726ee5fc8ce83c752 | 48aacf0425c5ab071972034c3fbd388feb036578 | /node-7/site-packages/ceph_deploy/connection.py | 381f6afa4c9bd0454eb2cda6a9881067950e2c18 | [] | no_license | wputra/MOS-centos | 2b8ec0116bb3a28632c54d6052d322a42391439f | 0a4f24dd4183d4d44e8c7beb27adce12e42f0201 | refs/heads/master | 2021-01-10T19:22:22.920342 | 2014-09-12T03:33:54 | 2014-09-12T03:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | from ceph_deploy.lib.remoto import Connection
from sudo_pushy import needs_sudo # TODO move this to utils once pushy is out
def get_connection(hostname, logger, threads=5):
"""
A very simple helper, meant to return a connection
that will know about the need to use sudo.
"""
try:
return Connection(
hostname,
logger=logger,
sudo=needs_sudo(),
threads=threads,
)
except Exception as error:
msg = "connecting to host: %s " % hostname
errors = "resulted in errors: %s %s" % (error.__class__.__name__, error)
raise RuntimeError(msg + errors)
| [
"[email protected]"
] | |
f54066fc82d29ccbcbb0f6fbc82e0b625fe67fb5 | ad0857eaba945c75e705594a53c40dbdd40467fe | /leetCode/maximal_rectangle.py | 98b030c3831e8cf2db830fb6e04f0209fe45fc5d | [
"MIT"
] | permissive | yskang/AlgorithmPractice | c9964d463fbd0d61edce5ba8b45767785b0b5e17 | 3efa96710e97c8740d6fef69e4afe7a23bfca05f | refs/heads/master | 2023-05-25T13:51:11.165687 | 2023-05-19T07:42:56 | 2023-05-19T07:42:56 | 67,045,852 | 0 | 0 | null | 2021-06-20T02:42:27 | 2016-08-31T14:40:10 | Python | UTF-8 | Python | false | false | 927 | py | class Solution(object):
def maximal_rectangle(self, matrix):
if not matrix or not matrix[0]:
return 0
width = len(matrix[0])
heights = [0] * (width+1)
ans = 0
for row in matrix:
for i in range(width):
heights[i] = heights[i] + 1 if row[i] == '1' else 0
stack = [-1]
for i in range(width+1):
while heights[i] < heights[stack[-1]]:
h = heights[stack.pop()]
w = i - 1 - stack[-1]
ans = max(ans, h * w)
stack.append(i)
return ans
if __name__ == "__main__":
sol = Solution()
print(sol.maximal_rectangle([["1", "0", "1", "0", "0"],
["1", "0", "1", "1", "1"],
["1", "1", "1", "1", "1"],
["1", "0", "1", "1", "0"]]))
| [
"[email protected]"
] | |
ac7a8bc1157a39c61201db61f88be40f9b180771 | 7cf52b987da6595ebc5f763b384b03e608ccb25f | /tests/index/test_mongodb_index.py | 87a66da4180d9cc8e832f22eb992d389b7dee1c3 | [] | no_license | shaypal5/dinglebop | 93fdfda48ec4d91c0a9485173a106d1edbcd1b29 | a10473b4abecfd70a00cd9086aa8919a404959c9 | refs/heads/master | 2021-08-20T10:17:46.640046 | 2017-11-14T18:01:43 | 2017-11-14T18:01:43 | 109,569,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,710 | py | """Testing the implementation of MongoDB-based dingle indexes."""
import pytest
from dinglebop.index.mongodb import MongoDBIndex
from dinglebop.shared import get_dinglebop_cfg
SAMPLE_IDEN1 = 'school_data_2016'
SAMPLE_IDEN2 = 'school_data_2017'
SAMPLE_DOC1 = {'identifier': SAMPLE_IDEN1, 'version': 'v1.0',
'store': 'somestore', 'format_identifier': 'arrow'}
SAMPLE_DOC2 = {'identifier': SAMPLE_IDEN1, 'version': 'v1.1',
'store': 'somestore', 'format_identifier': 'csv'}
SAMPLE_DOC3 = {'identifier': SAMPLE_IDEN2, 'version': 'v0.03',
'store': 'somestore', 'format_identifier': 'csv'}
SAMPLE_DOC4 = {'identifier': SAMPLE_IDEN2, 'version': 'v0.23',
'store': 'somestore', 'format_identifier': 'csv'}
SAMPLE_DOCS = [SAMPLE_DOC1, SAMPLE_DOC2, SAMPLE_DOC3, SAMPLE_DOC4]
def _get_mongodb_idx_instance():
dcfg = get_dinglebop_cfg()
idx_cfg = dcfg['dingles']['dinglebop_test']['index'].copy()
assert idx_cfg.pop('type') == 'MongoDB'
return MongoDBIndex(**idx_cfg)
def _get_idx_collection():
return _get_mongodb_idx_instance()._get_collection()
@pytest.fixture(scope="session", autouse=True)
def reset_idx_collection():
idx_obj = _get_mongodb_idx_instance()
collection = idx_obj._get_collection()
if MongoDBIndex._INDEX_NAME in collection.index_information():
collection.drop_index(MongoDBIndex._INDEX_NAME)
collection.delete_many({})
collection.insert_many([d.copy() for d in SAMPLE_DOCS])
def test_mongodb_index_autocreation():
idx_collection = _get_idx_collection()
assert MongoDBIndex._INDEX_NAME in idx_collection.index_information()
def test_get_all_dataset_entries():
dingle_idx = _get_mongodb_idx_instance()
cursor = dingle_idx.get_all_dataset_entries(identifier=SAMPLE_IDEN1)
docs = list(cursor)
assert len(docs) == 2
assert docs[0]['version'] == 'v1.1'
assert docs[1]['version'] == 'v1.0'
def test_get_latest_dataset_entry():
dingle_idx = _get_mongodb_idx_instance()
doc1 = dingle_idx.get_latest_dataset_entry(identifier=SAMPLE_IDEN1)
assert doc1['version'] == 'v1.1'
doc2 = dingle_idx.get_latest_dataset_entry(identifier=SAMPLE_IDEN2)
assert doc2['version'] == 'v0.23'
def test_get_dataset_entry_by_version():
dingle_idx = _get_mongodb_idx_instance()
doc = dingle_idx.get_dataset_entry_by_version(
identifier=SAMPLE_IDEN1, version='v1.0')
assert doc['format_identifier'] == 'arrow'
@pytest.fixture(scope='function')
def clear_all_idx_docs():
collection = _get_idx_collection()
collection.delete_many({})
def test_add_entry(clear_all_idx_docs):
dingle_idx = _get_mongodb_idx_instance()
dingle_idx.add_entry(**SAMPLE_DOC1)
dingle_idx.add_entry(**SAMPLE_DOC2)
docs = list(dingle_idx.get_all_dataset_entries(identifier=SAMPLE_IDEN1))
assert len(docs) == 2
@pytest.fixture(scope='function')
def add_all_idx_docs():
collection = _get_idx_collection()
collection.delete_many({})
collection.insert_many([d.copy() for d in SAMPLE_DOCS])
def test_remove_entries(add_all_idx_docs):
dingle_idx = _get_mongodb_idx_instance()
docs1 = list(dingle_idx.get_all_dataset_entries(identifier=SAMPLE_IDEN1))
assert len(docs1) == 2
docs2 = list(dingle_idx.get_all_dataset_entries(identifier=SAMPLE_IDEN2))
assert len(docs2) == 2
dingle_idx.remove_entries(identifier=SAMPLE_IDEN1, version='v1.0')
docs1 = list(dingle_idx.get_all_dataset_entries(identifier=SAMPLE_IDEN1))
assert len(docs1) == 1
dingle_idx.remove_entries(identifier=SAMPLE_IDEN2)
docs2 = list(dingle_idx.get_all_dataset_entries(identifier=SAMPLE_IDEN2))
assert len(docs2) == 0
| [
"[email protected]"
] | |
9f5159c90657e4c81d5b418e21cacd21836d48a7 | 0f3a0be642cd6a2dd792c548cf7212176761e9b1 | /pywps_services/r_spreadpath.py | afb9e04098aba8613b3ae1e979cf639f4e50b450 | [] | no_license | huhabla/wps-grass-bridge | 63a5d60735d372e295ec6adabe527eec9e72635a | aefdf1516a7517b1b745ec72e2d2481a78e10017 | refs/heads/master | 2021-01-10T10:10:34.246497 | 2014-01-22T23:40:58 | 2014-01-22T23:40:58 | 53,005,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,149 | py | # ################################################ #
# This process was generated using GrassXMLtoPyWPS #
# Author: Soeren Gebbert #
# Mail: soerengebbert <at> googlemail <dot> com #
# ################################################ #
from pywps.Process import WPSProcess
from PyWPSGrassModuleStarter import PyWPSGrassModuleStarter
class r_spreadpath(WPSProcess):
def __init__(self):
WPSProcess.__init__(self, identifier = 'r.spreadpath', title = 'Recursively traces the least cost path backwards to cells from which the cumulative cost was determined.', version = 1, statusSupported = True, storeSupported = True, metadata = [{'type': 'simple', 'title': 'raster'}, {'type': 'simple', 'title': 'fire'}, {'type': 'simple', 'title': 'cumulative costs'}], abstract = 'http://grass.osgeo.org/grass70/manuals/html70_user/r.spreadpath.html')
# Literal and complex inputs
self.addComplexInput(identifier = 'x_input', title = 'Name of raster map containing back-path easting information', minOccurs = 1, maxOccurs = 1, formats = [{'mimeType': 'image/tiff'}, {'mimeType': 'image/geotiff'}, {'mimeType': 'application/geotiff'}, {'mimeType': 'application/x-geotiff'}, {'mimeType': 'image/png'}, {'mimeType': 'image/gif'}, {'mimeType': 'image/jpeg'}, {'mimeType': 'application/x-erdas-hfa'}, {'mimeType': 'application/netcdf'}, {'mimeType': 'application/x-netcdf'}])
self.addComplexInput(identifier = 'y_input', title = 'Name of raster map containing back-path northing information', minOccurs = 1, maxOccurs = 1, formats = [{'mimeType': 'image/tiff'}, {'mimeType': 'image/geotiff'}, {'mimeType': 'application/geotiff'}, {'mimeType': 'application/x-geotiff'}, {'mimeType': 'image/png'}, {'mimeType': 'image/gif'}, {'mimeType': 'image/jpeg'}, {'mimeType': 'application/x-erdas-hfa'}, {'mimeType': 'application/netcdf'}, {'mimeType': 'application/x-netcdf'}])
self.addLiteralInput(identifier = 'coordinate', title = 'The map E and N grid coordinates of starting points', minOccurs = 0, maxOccurs = 1024, type = type("string"), allowedValues = '*')
self.addLiteralInput(identifier = '-v', title = 'Run verbosely', minOccurs = 0, maxOccurs = 1, type = type(True), default = False, allowedValues = [True, False])
self.addLiteralInput(identifier = 'grass_resolution_ns', title = 'Resolution of the mapset in north-south direction in meters or degrees', abstract = 'This parameter defines the north-south resolution of the mapset in meter or degrees, which should be used to process the input and output raster data. To enable this setting, you need to specify north-south and east-west resolution.', minOccurs = 0, maxOccurs = 1, type = type(0.0), allowedValues = '*')
self.addLiteralInput(identifier = 'grass_resolution_ew', title = 'Resolution of the mapset in east-west direction in meters or degrees', abstract = 'This parameter defines the east-west resolution of the mapset in meters or degrees, which should be used to process the input and output raster data. To enable this setting, you need to specify north-south and east-west resolution.', minOccurs = 0, maxOccurs = 1, type = type(0.0), allowedValues = '*')
self.addLiteralInput(identifier = 'grass_band_number', title = 'Band to select for processing (default is all bands)', abstract = 'This parameter defines band number of the input raster files which should be processed. As default all bands are processed and used as single and multiple inputs for raster modules.', minOccurs = 0, maxOccurs = 1, type = type(0), allowedValues = '*')
# complex outputs
self.addComplexOutput(identifier = 'output', title = 'Name of spread path raster map', formats = [{'mimeType': 'image/tiff'}, {'mimeType': 'image/geotiff'}, {'mimeType': 'application/geotiff'}, {'mimeType': 'application/x-geotiff'}, {'mimeType': 'application/x-erdas-hfa'}, {'mimeType': 'application/netcdf'}, {'mimeType': 'application/x-netcdf'}])
def execute(self):
starter = PyWPSGrassModuleStarter()
starter.fromPyWPS("r.spreadpath", self.inputs, self.outputs, self.pywps)
if __name__ == "__main__":
process = r_spreadpath()
process.execute()
| [
"soerengebbert@23da3d23-e2f9-862c-be8f-f61c6c06f202"
] | soerengebbert@23da3d23-e2f9-862c-be8f-f61c6c06f202 |
634e12c0e89842b519a5bae4fcff0bcc9f6bc466 | 6e19835f99efea46d7b7966144efa8e2302d5e4c | /tensorflow/python/autograph/utils/misc_test.py | c813e0f5c96386a0d0fbd078bd5b663c688b0327 | [
"Apache-2.0"
] | permissive | Cincan/tensorflow | 415fba147ef4676901f424a839d751aa7d1c50f0 | 94c9acddd9f3fd73a5e4b5bc1fd7c9284a68ea75 | refs/heads/master | 2020-04-08T14:07:14.355697 | 2018-11-28T00:59:40 | 2018-11-28T00:59:40 | 159,422,705 | 1 | 0 | Apache-2.0 | 2018-11-28T01:08:35 | 2018-11-28T01:08:35 | null | UTF-8 | Python | false | false | 1,719 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for misc module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.utils.misc import alias_tensors
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops.variables import Variable
from tensorflow.python.platform import test
class MiscTest(test.TestCase):
def test_alias_single_tensor(self):
a = constant(1)
new_a = alias_tensors(a)
self.assertFalse(new_a is a)
with self.cached_session() as sess:
self.assertEqual(1, self.evaluate(new_a))
def test_alias_tensors(self):
a = constant(1)
v = Variable(2)
s = 'a'
l = [1, 2, 3]
new_a, new_v, new_s, new_l = alias_tensors(a, v, s, l)
self.assertFalse(new_a is a)
self.assertTrue(new_v is v)
self.assertTrue(new_s is s)
self.assertTrue(new_l is l)
with self.cached_session() as sess:
self.assertEqual(1, self.evaluate(new_a))
if __name__ == '__main__':
test.main()
| [
"[email protected]"
] | |
d3bb18f8490dbe42f2945f71dc53ab3f6ba81073 | 012aadc12dc2a4560eabc04527414c3883e87e3d | /myvenv/bin/autopep8 | b7dcecda5a6c5b3d815b73491659a8d14741a669 | [] | no_license | kosiannpann/my-first-blog | a0c17286256e0d16a90b40b6b2f9beddebe9b03e | e41f4966da20785cabb9402e02a4119fb981fee1 | refs/heads/master | 2023-06-09T04:19:02.276691 | 2021-07-04T02:52:22 | 2021-07-04T02:52:22 | 376,177,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | #!/Users/ootadaiki/djangogirls/myvenv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from autopep8 import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
a1169142ea4526aa901d36823d53da96429542b2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02898/s602471427.py | 314d28278090b7657613ada6c2c07aa32f78faee | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | from sys import stdin
import sys
import math
from functools import reduce
import functools
import itertools
from collections import deque,Counter,defaultdict
from operator import mul
import copy
# ! /usr/bin/env python
# -*- coding: utf-8 -*-
import heapq
sys.setrecursionlimit(10**6)
# INF = float("inf")
INF = 10**18
import bisect
import statistics
mod = 10**9+7
# mod = 998244353
N, K = map(int, input().split())
h = list(map(int, input().split()))
ans = 0
for i in range(N):
if h[i] >= K:
ans += 1
print(ans) | [
"[email protected]"
] | |
df7b698115c3ffbcc37de296d1f45a03dd270d4e | 78144baee82268a550400bbdb8c68de524adc68f | /Production/python/Autumn18/RPV_2t6j_mStop-1300_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8_cff.py | 76c72b7101248b1ce6344a581378b039f338ce9b | [] | no_license | tklijnsma/TreeMaker | e6989c03189b849aff2007bad22e2bfc6922a244 | 248f2c04cc690ef2e2202b452d6f52837c4c08e5 | refs/heads/Run2_2017 | 2023-05-26T23:03:42.512963 | 2020-05-12T18:44:15 | 2020-05-12T18:44:15 | 263,960,056 | 1 | 2 | null | 2020-09-25T00:27:35 | 2020-05-14T15:57:20 | null | UTF-8 | Python | false | false | 1,467 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/RunIIAutumn18MiniAOD/RPV_2t6j_mStop-1300_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/30000/38EB0BD1-6F82-A44F-BF83-86E69D8B150E.root',
'/store/mc/RunIIAutumn18MiniAOD/RPV_2t6j_mStop-1300_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/30000/3A2A6249-6A8F-D24F-A36F-4C441E9A6DF1.root',
'/store/mc/RunIIAutumn18MiniAOD/RPV_2t6j_mStop-1300_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/30000/4041B441-D1EF-534F-B6BB-C2C07AB51940.root',
'/store/mc/RunIIAutumn18MiniAOD/RPV_2t6j_mStop-1300_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/30000/795C52C1-CEAD-7F44-9D3B-8737D8AC54DE.root',
'/store/mc/RunIIAutumn18MiniAOD/RPV_2t6j_mStop-1300_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/30000/BF2BF1E5-ECC7-9042-A2A8-B906E018E1F2.root',
'/store/mc/RunIIAutumn18MiniAOD/RPV_2t6j_mStop-1300_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/30000/FBE70B20-508A-984A-9CBF-95601BA7E965.root',
] )
| [
"[email protected]"
] | |
68554203dcadc071481784c3c5bb2aa165f03998 | f5ffd566166948c4202eb1e66bef44cf55a70033 | /openapi_client/model/array_of_groups.py | 49b6ca340c93633e7253f9ed715d5e9b8508b2ce | [] | no_license | skyportal/skyportal_client | ed025ac6d23589238a9c133d712d4f113bbcb1c9 | 15514e4dfb16313e442d06f69f8477b4f0757eaa | refs/heads/master | 2023-02-10T02:54:20.757570 | 2021-01-05T02:18:03 | 2021-01-05T02:18:03 | 326,860,562 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,650 | py | """
Fritz: SkyPortal API
SkyPortal provides an API to access most of its underlying functionality. To use it, you will need an API token. This can be generated via the web application from your profile page or, if you are an admin, you may use the system provisioned token stored inside of `.tokens.yaml`. ### Accessing the SkyPortal API Once you have a token, you may access SkyPortal programmatically as follows. #### Python ```python import requests token = 'ea70a5f0-b321-43c6-96a1-b2de225e0339' def api(method, endpoint, data=None): headers = {'Authorization': f'token {token}'} response = requests.request(method, endpoint, json=data, headers=headers) return response response = api('GET', 'http://localhost:5000/api/sysinfo') print(f'HTTP code: {response.status_code}, {response.reason}') if response.status_code in (200, 400): print(f'JSON response: {response.json()}') ``` #### Command line (curl) ```shell curl -s -H 'Authorization: token ea70a5f0-b321-43c6-96a1-b2de225e0339' http://localhost:5000/api/sysinfo ``` ### Response In the above examples, the SkyPortal server is located at `http://localhost:5000`. In case of success, the HTTP response is 200: ``` HTTP code: 200, OK JSON response: {'status': 'success', 'data': {}, 'version': '0.9.dev0+git20200819.84c453a'} ``` On failure, it is 400; the JSON response has `status=\"error\"` with the reason for the failure given in `message`: ```js { \"status\": \"error\", \"message\": \"Invalid API endpoint\", \"data\": {}, \"version\": \"0.9.1\" } ``` # Authentication <!-- ReDoc-Inject: <security-definitions> --> # noqa: E501
The version of the OpenAPI document: 0.9.dev0+git20201221.76627dd
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from openapi_client.model.group import Group
globals()['Group'] = Group
class ArrayOfGroups(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('status',): {
'SUCCESS': "success",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'status': (str,), # noqa: E501
'message': (str,), # noqa: E501
'data': ([Group],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'status': 'status', # noqa: E501
'message': 'message', # noqa: E501
'data': 'data', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ArrayOfGroups - a model defined in OpenAPI
Args:
Keyword Args:
status (str): defaults to "success", must be one of ["success", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
message (str): [optional] # noqa: E501
data ([Group]): [optional] # noqa: E501
"""
status = kwargs.get('status', "success")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.status = status
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| [
"[email protected]"
] | |
0bf8b725ddbfa47071048214793a8fd56f8a68d9 | 90c6262664d013d47e9a3a9194aa7a366d1cabc4 | /tests/storage/cases/test_KT1GvgQwPwo8ZdYojFrQyjs1QtjRKjn52cbV.py | 2ae9510b0d691b63af6e04ec0a6f9d672271926b | [
"MIT"
] | permissive | tqtezos/pytezos | 3942fdab7aa7851e9ea81350fa360180229ec082 | a4ac0b022d35d4c9f3062609d8ce09d584b5faa8 | refs/heads/master | 2021-07-10T12:24:24.069256 | 2020-04-04T12:46:24 | 2020-04-04T12:46:24 | 227,664,211 | 1 | 0 | MIT | 2020-12-30T16:44:56 | 2019-12-12T17:47:53 | Python | UTF-8 | Python | false | false | 1,130 | py | from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1GvgQwPwo8ZdYojFrQyjs1QtjRKjn52cbV(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/zeronet/KT1GvgQwPwo8ZdYojFrQyjs1QtjRKjn52cbV.json')
def test_storage_encoding_KT1GvgQwPwo8ZdYojFrQyjs1QtjRKjn52cbV(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1GvgQwPwo8ZdYojFrQyjs1QtjRKjn52cbV(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1GvgQwPwo8ZdYojFrQyjs1QtjRKjn52cbV(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
| [
"[email protected]"
] | |
8f05bd2092972f6b401e756e15c2117a31a5a4ba | ad69b52951c2f80d152b9ce2225b9a588f110deb | /fan_element_struct.py | 66a7720d1a15cb86b7b4a0c70052200350ac8318 | [] | no_license | hailangzz/fan_health_program | 47c70fe884ec8e28b20be63f99d5c3004bb2a261 | 137d8a1a2271a44c68fe5a5b2b4e367023c0efad | refs/heads/master | 2020-03-19T16:03:13.442179 | 2018-06-09T07:47:34 | 2018-06-09T07:47:34 | 136,698,139 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,210 | py | #coding=utf-8
import numpy as np
import copy
def fan_element_struct():
#机组状态码暂时不确定,因此后续动态添加···,做单台机组状态码频率分布···,'stames_alive_list'是不同机组状态的存活列表···
stames_code={'stamescode_number':0,'stamescode_time':0,'reduce_power':0,'stames_alive_list':[]}
#机组故障及其次数统计
error={'uiHubErr':{'HubErr_code':{},'starttime':[]},
'uiErrFir':{'ErrFir_code':{},'starttime':[]},
'uiConErr':{'ConErr_code':{},'starttime':[]},
'uiYawErr':{'YawErr_code':{},'starttime':[]},
'uiWarFir':{'WarFir_code':{},'starttime':[]}
}
#windspeed_array=[set_wind_cut for set_wind_cut in np.arange(3,20,0.1)]
#存储计算机组正常的功率曲线数据···,可以基于此数据,统计风况概率分布,不同风况下机组总发电量分布```
normal_power_curve={}
windspeed_array=np.arange(3,20,0.2)
for wind_cut in windspeed_array:
if wind_cut not in normal_power_curve:
normal_power_curve[round(wind_cut,1)]={'total_power':0,'registe_number':0,'poweravg':0}
hzth_standard_wind_power={}
hzth_power_list=[123,142,164,189,213,239,268,300,331,366,398,434,470,514,552,593,630,661,707,742,806,843,893,953,1001,1049,1095,1147,1204,1248,1293,
1353,1398,1428,1465,1481,1493,1501,1514,1528,1540,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,
1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552,1552
]
windspeed_array=np.arange(3,20,0.2)
for wind_cut_id in range(len(windspeed_array)):
if windspeed_array[wind_cut_id] not in hzth_standard_wind_power:
hzth_standard_wind_power[round(windspeed_array[wind_cut_id],1)]={'poweravg':0}
hzth_standard_wind_power[round(windspeed_array[wind_cut_id],1)]['poweravg']=hzth_power_list[wind_cut_id]
#用于存储风机全部出口功率频率分布···
power_status_distribute={}
power_status=np.arange(0,1800,10)
for power_cut in power_status:
if power_cut not in power_status_distribute:
power_status_distribute[power_cut]={'registe_number':0}
#用于统计存储风机风况频率分布···
wind_status_distribute={}
wind_array=np.arange(0,20,0.2)
for wind_cut in wind_array:
if wind_cut not in wind_status_distribute:
wind_status_distribute[round(wind_cut,1)]={'registe_number':0}
fChoGenTemAve_status_distribute={}
temperature1_cut=np.arange(0,200,2)
for temp1_cut in temperature1_cut:
if temp1_cut not in fChoGenTemAve_status_distribute:
fChoGenTemAve_status_distribute[temp1_cut]={'registe_number':0}
fGeaBeaTemAve_status_distribute={}
temperature1_cut=np.arange(0,150,2)
for temp1_cut in temperature1_cut:
if temp1_cut not in fGeaBeaTemAve_status_distribute:
fGeaBeaTemAve_status_distribute[temp1_cut]={'registe_number':0}
fGeaOilTemAve_status_distribute={}
temperature1_cut=np.arange(0,150,2)
for temp1_cut in temperature1_cut:
if temp1_cut not in fGeaOilTemAve_status_distribute:
fGeaOilTemAve_status_distribute[temp1_cut]={'registe_number':0}
fGenTemAve_status_distribute={}
temperature1_cut=np.arange(0,200,2)
for temp1_cut in temperature1_cut:
if temp1_cut not in fGenTemAve_status_distribute:
fGenTemAve_status_distribute[temp1_cut]={'registe_number':0}
fGenBeaDriTemAve_status_distribute={}
temperature1_cut=np.arange(0,150,2)
for temp1_cut in temperature1_cut:
if temp1_cut not in fGenBeaDriTemAve_status_distribute:
fGenBeaDriTemAve_status_distribute[temp1_cut]={'registe_number':0}
fConGsclgbTemAve_status_distribute={}
temperature1_cut=np.arange(0,150,2)
for temp1_cut in temperature1_cut:
if temp1_cut not in fConGsclgbTemAve_status_distribute:
fConGsclgbTemAve_status_distribute[temp1_cut]={'registe_number':0}
tenminlog={'wind_status_distribute':{},#存储风况频率分布
'power_status_distribute':{},#存储正常功率频率分布
'fChoGenTemAve_distribute':{},#存储机组发电机感应线圈温度频率分布···
'fGeaBeaTemAve_distribute':{},#存储机组齿轮箱温度频率分布···
'fGeaOilTemAve_distribute':{},#存储机组齿轮箱油温频率分布···
'fGenTemAve_distribute':{},
'fGenBeaDriTemAve_distribute':{},
'fConGsclgbTemAve_distribute':{},
'normal_power_splat':{'wind_list':[],'power_list':[]},#存储正常功率风速散点···
'all_power_splat':{'wind_list':[],'power_list':[]},#存储所有功率风速散点···
'selflimite_power_splat':{'wind_list':[],'power_list':[]},#存储超温限功率散点···
'limite_power_splat':{'wind_list':[],'power_list':[]},
'stop_power_splat':{'wind_list':[],'power_list':[]},
#超温限功率数据统计···
'over_temperature':{'fChoGenTemAve':{'number':0,'total_time':0},
'fGeaBeaTemAve':{'number':0,'total_time':0},
'fGeaOilTemAve':{'number':0,'total_time':0},
'fGenTemAve':{'number':0,'total_time':0},
'fGenBeaDriTemAve':{'number':0,'total_time':0},
'fConGsclgbTemAve':{'number':0,'total_time':0}
},
'totalpower':0,#机组总发电量···
'normal_totalpower':0,#机组正常发电总的发电量存储···
'selflimite_totaltime':0,
'limite_totaltime':0,
'stop_totaltime':0,
'over_temperature_totaltime':0,
'hzth_increase_totalpower':0,
'selflimite_reducepower':0, #限功率损失发电量统计···
'limite_reducepower':0,
'stop_reducepower':0,
'fChoGenTemAve':{'registe_id':[],'temperature':[]},
'fGeaBeaTemAve':{'registe_id':[],'temperature':[]},
'fGeaOilTemAve':{'registe_id':[],'temperature':[]},
'fGenTemAve':{'registe_id':[],'temperature':[]},
'fGenBeaDriTemAve':{'registe_id':[],'temperature':[]},
'fConGsclgbTemAve':{'registe_id':[],'temperature':[]}
#机组部件温度数据概率分布统计···
}
#初始化‘tenminlog’结构变量···
tenminlog['wind_status_distribute']=copy.deepcopy(wind_status_distribute)
tenminlog['power_status_distribute']=copy.deepcopy(power_status_distribute)
tenminlog['fChoGenTemAve_distribute']=copy.deepcopy(fChoGenTemAve_status_distribute)
tenminlog['fGeaBeaTemAve_distribute']=copy.deepcopy(fGeaBeaTemAve_status_distribute)
tenminlog['fGeaOilTemAve_distribute']=copy.deepcopy(fGeaOilTemAve_status_distribute)
tenminlog['fGenTemAve_distribute']=copy.deepcopy(fGenTemAve_status_distribute)
tenminlog['fGenBeaDriTemAve_distribute']=copy.deepcopy(fGenBeaDriTemAve_status_distribute)
tenminlog['fConGsclgbTemAve_distribute']=copy.deepcopy(fConGsclgbTemAve_status_distribute)
fan_element={'stames':{},'error':{},'tenminlog':{},'normal_power_curve':{},'fanset_information':{'fanid':0,'fanname':'','fanip':'','fantype':0,'plctype':0}}
fan_element['error']=copy.deepcopy(error)
fan_element['tenminlog']=copy.deepcopy(tenminlog)
fan_element['normal_power_curve']=copy.deepcopy(normal_power_curve)
fan_element['hzth_standard_wind_power']=copy.deepcopy(hzth_standard_wind_power)
fan_root_dict={}
return fan_root_dict,fan_element,stames_code
| [
"[email protected]"
] | |
0c8d931e83ca07c53fe67e67250f3e7cb9fb37c8 | 09dbc3b3ecf116eda30b039c641913b63aecb991 | /turbustat/data_reduction/data_reduc.py | 41e624b91294e59713b677f5f9150537dbd1d512 | [
"MIT"
] | permissive | keflavich/TurbuStat | 2a3e2a891933046074adb6a20f93977ad136e750 | a6fac4c0d10473a74c62cce4a9c6a30773a955b1 | refs/heads/master | 2021-01-18T06:30:06.717191 | 2014-07-04T00:05:27 | 2014-07-04T00:05:27 | 21,695,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,244 | py | # Licensed under an MIT open source license - see LICENSE
'''
Data Reduction Routines for PPV data cubes
'''
import numpy as np
from scipy import ndimage as nd
from operator import itemgetter
from itertools import groupby
from astropy.io import fits
import copy
from scipy.optimize import curve_fit
from astropy.convolution import convolve
class property_arrays(object):
'''
Create property arrays from a data cube
Creates centroid (moment 1), integrated intensity, velocity dispersion (moment 2), total intensity (moment 0)
'''
def __init__(self, cube, clip_level = 3,rms_noise = None, kernel_size=None, save_name=None):
super(property_arrays, self).__init__()
self.cube = cube[0]#cube.data
self.header = cube[1]#cube.header
self.array_shape = (self.cube.shape[1],self.cube.shape[2])
self.save_name = save_name
self.clean_cube = np.ones(self.cube.shape)
self.noise_array = None
self.nan_mask = np.invert(np.isnan(self.cube), dtype=bool)
self.weight_cube = np.ones(self.cube.shape)
for i in range(self.cube.shape[1]):
for j in range(self.cube.shape[2]):
self.weight_cube[:,i,j] = np.arange(1,self.cube.shape[0]+1,1)
self.sigma = None
self.property_dict = {}
if rms_noise != None:
if isinstance(rms_noise, float):
self.noise_type_flag = 1
self.sigma = rms_noise
self.noise_array = np.ones(self.array_shape) * self.sigma
self.noise_mask = np.ones(self.array_shape)
self.clean_cube[self.cube < (clip_level * self.sigma)] = 0.0
self.clean_cube *= np.ma.masked_invalid(self.cube)
else:
self.noise_type_flag = 2
self.clean_cube, self.noise_array, self.sigma = given_noise_cube(self.cube, rms_noise, clip_level)
self.noise_mask = self.noise_array < (clip_level * self.sigma)
else:
if not kernel_size:
raise ValueError("Kernel Size must be given for moment masking.")
self.noise_type_flag = 0
self.clean_cube, self.mask_cube, self.sigma = moment_masking(self.cube, clip_level, kernel_size)
# self.noise_mask = self.noise_array < (clip_level * self.sigma)
self.nan_mask += self.mask_cube
def moment0(self):
moment0_array = np.sum(self.clean_cube * self.nan_mask, axis=0)
# moment0_array *= self.noise_mask
error_array = self.sigma * np.sqrt(np.sum(self.nan_mask * (self.clean_cube>0), axis=0))
# error_array *= self.noise_mask
self.property_dict["moment0"] = moment0_array, error_array
return self
def centroid(self):
centroid_array = np.sum(self.clean_cube * self.nan_mask * self.weight_cube, axis=0) / self.property_dict["moment0"][0]
# centroid_array *= self.noise_mask
first_err_term = self.sigma**2. * np.sqrt(np.sum(self.weight_cube[np.nonzero(self.clean_cube * self.nan_mask)], axis=0)) / self.property_dict["moment0"][0]**2.
second_err_term = self.property_dict["moment0"][1]**2. / self.property_dict["moment0"][0]**2.
error_array = np.sqrt(first_err_term + second_err_term)
# error_array *= self.noise_mask
self.property_dict["centroid"] = centroid_array, error_array
return self
def integrated_intensity(self):
masked_clean = self.clean_cube * self.nan_mask
int_intensity_array = np.ones(self.array_shape)
error_array = np.ones(self.array_shape)
for i in range(self.array_shape[0]):
for j in range(self.array_shape[1]):
z = np.where(masked_clean[:,i,j]>0)
continuous_sections = []
for _, g in groupby(enumerate(z[0]), lambda (i,x): i-x):
continuous_sections.append(map(itemgetter(1), g))
try:
integrating_section = max(continuous_sections, key=len)
int_intensity_array[i,j] = np.sum([masked_clean[k,i,j] for k in integrating_section])
error_array[i,j] = (np.sqrt(len(integrating_section)))**-1. * self.sigma
except ValueError:
int_intensity_array[i,j] = np.NaN
error_array[i,j] = np.NaN
self.property_dict["int_int"] = int_intensity_array, error_array
return self
def linewidth(self):
masked_clean = self.clean_cube * self.nan_mask
weight_clean = self.weight_cube * self.nan_mask
linewidth_array = np.empty(self.array_shape)
error_array = np.empty(self.array_shape)
for i in range(self.array_shape[0]):
for j in range(self.array_shape[1]):
linewidth_array[i,j] = np.sqrt(np.sum((weight_clean[:,i,j] - self.property_dict["centroid"][0][i,j])**2. * masked_clean[:,i,j]) / \
self.property_dict["moment0"][0][i,j])
first_err_term = (2 * np.sum((weight_clean[:,i,j] - self.property_dict["centroid"][0][i,j]) * masked_clean[:,i,j]) * self.property_dict["centroid"][1][i,j]**2. +\
self.sigma**2. * np.sum((weight_clean[:,i,j] - self.property_dict["centroid"][0][i,j])**2.)) / \
np.sum((weight_clean[:,i,j] - self.property_dict["centroid"][0][i,j])**2. * masked_clean[:,i,j])**2.
second_err_term = self.sigma**2. * np.sum(self.nan_mask[:,i,j])**2. / self.property_dict["moment0"][0][i,j]**2.
error_array[i,j] = np.sqrt(first_err_term + second_err_term)
self.property_dict["linewidth"] = linewidth_array, error_array
def pixel_to_physical_units(self):
if np.abs(self.header["CDELT3"])> 1: ## Lazy check to make sure we have units of km/s
vel_pix_division = np.abs(self.header["CDELT3"])/1000.
reference_velocity = self.header["CRVAL3"]/1000.
else:
vel_pix_division = np.abs(self.header["CDELT3"])
reference_velocity = self.header["CRVAL3"]
## Centroid error needs to be recalculated when changing to physical units
physical_weights = (np.sum(self.weight_cube, axis=0) * vel_pix_division) + \
reference_velocity - (vel_pix_division * self.header["CRPIX3"])
first_err_term = self.sigma**2. * np.sqrt(np.sum(physical_weights * (self.clean_cube>0) * self.nan_mask, axis=0)) / self.property_dict["moment0"][0]**2.
second_err_term = self.property_dict["moment0"][1]**2. / self.property_dict["moment0"][0]**2.
cent_error_array = np.sqrt(first_err_term + second_err_term)
# cent_error_array *= self.noise_mask
self.property_dict["centroid"] = (self.property_dict["centroid"][0] * vel_pix_division) + \
reference_velocity - (vel_pix_division * self.header["CRPIX3"]), \
cent_error_array
self.property_dict["int_int"] = (self.property_dict["int_int"][0] * vel_pix_division, \
self.property_dict["int_int"][1] * vel_pix_division)
self.property_dict["linewidth"] = (self.property_dict["linewidth"][0] * vel_pix_division, \
self.property_dict["linewidth"][1] * vel_pix_division)
return self
def save_fits(self, save_path=None):
new_hdr = copy.deepcopy(self.header)
del new_hdr["NAXIS3"],new_hdr["CRVAL3"],new_hdr["CRPIX3"],new_hdr['CDELT3'], new_hdr['CTYPE3']
new_hdr.update("NAXIS",2)
new_err_hdr = copy.deepcopy(new_hdr)
if self.save_name is None:
self.save_name = self.header["OBJECT"]
moment0_specs = {'comment': "= Image of the Zeroth Moment", 'BUNIT': 'K', 'name': 'moment0'}
centroid_specs = {'comment': "= Image of the First Moment", 'BUNIT': 'km/s', 'name': 'centroid'}
linewidth_specs = {'comment': "= Image of the Second Moment", 'BUNIT': 'km/s', 'name': 'linewidth'}
int_int_specs = {'comment': "= Image of the Integrated Intensity", 'BUNIT': 'K km/s', 'name': 'integrated_intensity'}
moment0_error_specs = {'comment': "= Image of the Zeroth Moment Error", 'BUNIT': 'K', 'name': 'moment0'}
centroid_error_specs = {'comment': "= Image of the First Moment Error", 'BUNIT': 'km/s', 'name': 'centroid'}
linewidth_error_specs = {'comment': "= Image of the Second Moment Error", 'BUNIT': 'km/s', 'name': 'linewidth'}
int_int_error_specs = {'comment': "= Image of the Integrated Intensity Error", 'BUNIT': 'K km/s', 'name': 'integrated_intensity'}
for prop_array in self.property_dict.keys():
if prop_array=='moment0':
specs = moment0_specs
specs_error = moment0_error_specs
elif prop_array=='centroid':
specs = centroid_specs
specs_error = centroid_error_specs
elif prop_array=='int_int':
specs = int_int_specs
specs_error = int_int_error_specs
elif prop_array=='linewidth':
specs = linewidth_specs
specs_error = linewidth_error_specs
if save_path!=None:
filename = "".join([save_path, self.save_name, ".", specs["name"], ".fits"])
filename_err = "".join([save_path, self.save_name, ".", specs["name"], "_error.fits"])
else:
filename = "".join([self.save_name, ".", specs["name"], ".fits"])
filename_err = "".join([self.save_name, ".", specs["name"], "_error.fits"])
## Update header for array and the error array
new_hdr.update("BUNIT",value=specs['BUNIT'],comment='')
new_hdr.add_comment(specs["comment"])
new_err_hdr.update("BUNIT",value=specs['BUNIT'],comment='')
new_err_hdr.add_comment(specs["comment"])
fits.writeto(filename,self.property_dict[prop_array][0],new_hdr)
fits.writeto(filename_err,self.property_dict[prop_array][1],new_hdr)
## Reset the comments
del new_hdr["COMMENT"]
del new_err_hdr["COMMENT"]
return self
def return_all(self, save=True, physical_units=True, continuous_boundary=True, save_path=None):
self.moment0()
self.centroid()
self.linewidth()
self.integrated_intensity()
if physical_units:
self.pixel_to_physical_units()
if continuous_boundary:
for prop_array in self.property_dict.keys():
pass
if save:
self.save_fits(save_path = None)
return self
def given_noise_cube(data_cube, noise_cube, clip_level):
if data_cube.shape!=noise_cube.shape:
raise ValueError("Error array has different dimensions.")
assert clip_level is int
noise_cube[np.where(noise_cube==0)] = np.NaN
clipped_cube = (data_cube/noise_cube) >= clip_level
inv_cube = np.invert(clip_cube,dtype=bool)
noise_array = np.max(inv_cube*data_cube,axis=0)
sigma = np.mean(noise_array)
return clipped_cube * data_cube, noise_array, sigma
def __sigma__(data_cube, clip_level):
flat_cube = np.ravel(data_cube[~np.isnan(data_cube)])
hist, bins = np.histogram(flat_cube, bins = int(len(flat_cube)/100.))
centres = (bins[:-1]+bins[1:])/2
def gaussian(x,*p):
# Peak Height is p[0],Sigma is p[1],Mu is p[2]
return p[0]*np.exp(-1*np.power(x-p[2],2) / (2*np.power(p[1],2)))
p0 = (np.max(hist), 1.0, centres[np.argmax(hist)])
opts, cov = curve_fit(gaussian, centres, hist, p0, maxfev=(100*len(hist))+1)
if opts[1] == p0[1]:
print "Fitting Failed. Sigma is %s" % (opts[1])
return opts[1]
def moment_masking(data_cube, clip_level, kernel_size):
sigma_orig = __sigma__(data_cube, clip_level)
if np.isnan(data_cube).any():
print "Using astropy to convolve over nans"
kernel = gauss_kern(kernel_size, ysize=kernel_size, zsize=kernel_size)
smooth_cube = convolve(data_cube, kernel, normalize_kernel=True)
else:
smooth_cube = nd.gaussian_filter(data_cube, kernel_size, mode="mirror")
sigma_smooth = __sigma__(smooth_cube, clip_level)
mask_cube = smooth_cube > (clip_level * sigma_smooth)
dilate_struct = nd.generate_binary_structure(3,3)
mask_cube = nd.binary_dilation(mask_cube, structure=dilate_struct)
noise_cube = np.invert(mask_cube, dtype=bool) * data_cube
# noise_array = np.max(noise_cube, axis=0)
return (mask_cube * data_cube), mask_cube, sigma_orig
def pad_wrapper(array, boundary_size=5):
xshape, yshape = array.shape
continuous_array = np.zeros((xshape - 6*boundary_size, yshape - 6*boundary_size))
reduced_array = array[boundary_size : xshape - boundary_size, boundary_size : yshape - boundary_size]
pass
def gauss_kern(size, ysize=None, zsize=None):
""" Returns a normalized 3D gauss kernel array for convolutions """
size = int(size)
if not ysize:
ysize = size
else:
ysize = int(ysize)
if not zsize:
zsize = size
else:
zsize = int(zsize)
x, y, z = np.mgrid[-size:size+1, -ysize:ysize+1, -zsize:zsize+1]
g = np.exp(-(x**2/float(size)+y**2/float(ysize)+z**2/float(zsize)))
return g / g.sum()
if __name__=='__main__':
pass
# import sys
# fib(int(sys.argv[1]))
# from astropy.io.fits import getdata
# cube, header = getdata("filename",header=True)
# shape = cube.shape
# cube[:,shape[0],:] = cube[:,0,:]
# cube[:,:,shape[1]] = cube[:,:,0]
# data = property_arrays((cube,header), rms_noise=0.001, save_name="filename")
# data.return_all() | [
"[email protected]"
] | |
d917aed4d0dc683d3061f78f5a904422e86e49e2 | 26a660be93842a94c6416491fcf29bbab4a98a66 | /dev_utils/BioLogic_.py | 63ea4d02289cf872e0b1f0f10bd2c512fdf203c7 | [
"MIT"
] | permissive | indigos33k3r/cellpy | b9d2b37c994c41c73e5a3a0a439c787b9857e978 | 7aef2bb416d1506229747320cf73dc199704f585 | refs/heads/master | 2020-04-14T10:37:44.810456 | 2018-11-16T13:39:08 | 2018-11-16T13:39:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,053 | py | # -*- coding: utf-8 -*-
"""Code to read in data files from Bio-Logic instruments"""
# created by Chris Kerr
# downloaded from https://github.com/chatcannon/galvani/blob/master/galvani/BioLogic.py
__all__ = ['MPTfileCSV', 'MPTfile']
import sys
import re
import csv
from os import SEEK_SET
import time
from datetime import date, datetime, timedelta
from collections import OrderedDict
import numpy as np
if sys.version_info.major <= 2:
str3 = str
from string import maketrans
else:
str3 = lambda b: str(b, encoding='ascii')
maketrans = bytes.maketrans
def fieldname_to_dtype(fieldname):
"""Converts a column header from the MPT file into a tuple of
canonical name and appropriate numpy dtype"""
if fieldname == 'mode':
return ('mode', np.uint8)
elif fieldname in ("ox/red", "error", "control changes", "Ns changes",
"counter inc."):
return (fieldname, np.bool_)
elif fieldname in ("time/s", "P/W", "(Q-Qo)/mA.h", "x", "control/V",
"control/V/mA", "(Q-Qo)/C", "dQ/C", "freq/Hz",
"|Ewe|/V", "|I|/A", "Phase(Z)/deg", "|Z|/Ohm",
"Re(Z)/Ohm", "-Im(Z)/Ohm"):
return (fieldname, np.float_)
# N.B. I'm not sure what 'Ns' is as in the only file I have with that
# header it never has any value other than '0'
elif fieldname in ("cycle number", "I Range", "Ns"):
return (fieldname, np.int_)
elif fieldname in ("dq/mA.h", "dQ/mA.h"):
return ("dQ/mA.h", np.float_)
elif fieldname in ("I/mA", "<I>/mA"):
return ("I/mA", np.float_)
elif fieldname in ("Ewe/V", "<Ewe>/V"):
return ("Ewe/V", np.float_)
else:
raise ValueError("Invalid column header: %s" % fieldname)
def comma_converter(float_string):
"""Convert numbers to floats whether the decimal point is '.' or ','"""
trans_table = maketrans(b',', b'.')
return float(float_string.translate(trans_table))
def MPTfile(file_or_path):
"""Opens .mpt files as numpy record arrays
Checks for the correct headings, skips any comments and returns a
numpy record array object and a list of comments
"""
if isinstance(file_or_path, str):
mpt_file = open(file_or_path, 'rb')
else:
mpt_file = file_or_path
magic = next(mpt_file)
if magic != b'EC-Lab ASCII FILE\r\n':
raise ValueError("Bad first line for EC-Lab file: '%s'" % magic)
nb_headers_match = re.match(b'Nb header lines : (\d+)\s*$', next(mpt_file))
nb_headers = int(nb_headers_match.group(1))
if nb_headers < 3:
raise ValueError("Too few header lines: %d" % nb_headers)
## The 'magic number' line, the 'Nb headers' line and the column headers
## make three lines. Every additional line is a comment line.
comments = [next(mpt_file) for i in range(nb_headers - 3)]
fieldnames = str3(next(mpt_file)).strip().split('\t')
record_type = np.dtype(list(map(fieldname_to_dtype, fieldnames)))
## Must be able to parse files where commas are used for decimal points
converter_dict = dict(((i, comma_converter)
for i in range(len(fieldnames))))
mpt_array = np.loadtxt(mpt_file, dtype=record_type,
converters=converter_dict)
return mpt_array, comments
def MPTfileCSV(file_or_path):
"""Simple function to open MPT files as csv.DictReader objects
Checks for the correct headings, skips any comments and returns a
csv.DictReader object and a list of comments
"""
if isinstance(file_or_path, str):
mpt_file = open(file_or_path, 'r')
else:
mpt_file = file_or_path
magic = next(mpt_file)
if magic.rstrip() != 'EC-Lab ASCII FILE':
raise ValueError("Bad first line for EC-Lab file: '%s'" % magic)
nb_headers_match = re.match('Nb header lines : (\d+)\s*$', next(mpt_file))
nb_headers = int(nb_headers_match.group(1))
if nb_headers < 3:
raise ValueError("Too few header lines: %d" % nb_headers)
## The 'magic number' line, the 'Nb headers' line and the column headers
## make three lines. Every additional line is a comment line.
comments = [next(mpt_file) for i in range(nb_headers - 3)]
mpt_csv = csv.DictReader(mpt_file, dialect='excel-tab')
expected_fieldnames = (
["mode", "ox/red", "error", "control changes", "Ns changes",
"counter inc.", "time/s", "control/V/mA", "Ewe/V", "dq/mA.h",
"P/W", "<I>/mA", "(Q-Qo)/mA.h", "x"],
['mode', 'ox/red', 'error', 'control changes', 'Ns changes',
'counter inc.', 'time/s', 'control/V', 'Ewe/V', 'dq/mA.h',
'<I>/mA', '(Q-Qo)/mA.h', 'x'],
["mode", "ox/red", "error", "control changes", "Ns changes",
"counter inc.", "time/s", "control/V", "Ewe/V", "I/mA",
"dQ/mA.h", "P/W"],
["mode", "ox/red", "error", "control changes", "Ns changes",
"counter inc.", "time/s", "control/V", "Ewe/V", "<I>/mA",
"dQ/mA.h", "P/W"])
if mpt_csv.fieldnames not in expected_fieldnames:
raise ValueError("Unrecognised headers for MPT file format")
return mpt_csv, comments
VMPmodule_hdr = np.dtype([('shortname', 'S10'),
('longname', 'S25'),
('length', '<u4'),
('version', '<u4'),
('date', 'S8')])
def VMPdata_dtype_from_colIDs(colIDs):
dtype_dict = OrderedDict()
flags_dict = OrderedDict()
flags2_dict = OrderedDict()
for colID in colIDs:
if colID in (1, 2, 3, 21, 31, 65):
dtype_dict['flags'] = 'u1'
if colID == 1:
flags_dict['mode'] = (np.uint8(0x03), np.uint8)
elif colID == 2:
flags_dict['ox/red'] = (np.uint8(0x04), np.bool_)
elif colID == 3:
flags_dict['error'] = (np.uint8(0x08), np.bool_)
elif colID == 21:
flags_dict['control changes'] = (np.uint8(0x10), np.bool_)
elif colID == 31:
flags_dict['Ns changes'] = (np.uint8(0x20), np.bool_)
elif colID == 65:
flags_dict['counter inc.'] = (np.uint8(0x80), np.bool_)
else:
raise NotImplementedError("flag %d not implemented" % colID)
elif colID in (131,):
dtype_dict['flags2'] = '<u2'
if colID == 131:
flags2_dict['??'] = (np.uint16(0x0001), np.bool_)
elif colID == 4:
dtype_dict['time/s'] = '<f8'
elif colID == 5:
dtype_dict['control/V/mA'] = '<f4'
# 6 is Ewe, 77 is <Ewe>, I don't see the difference
elif colID in (6, 77):
dtype_dict['Ewe/V'] = '<f4'
# Can't see any difference between 7 and 23
elif colID in (7, 23):
dtype_dict['dQ/mA.h'] = '<f8'
# 76 is <I>, 8 is either I or <I> ??
elif colID in (8, 76):
dtype_dict['I/mA'] = '<f4'
elif colID == 11:
dtype_dict['I/mA'] = '<f8'
elif colID == 19:
dtype_dict['control/V'] = '<f4'
elif colID == 24:
dtype_dict['cycle number'] = '<f8'
elif colID == 32:
dtype_dict['freq/Hz'] = '<f4'
elif colID == 33:
dtype_dict['|Ewe|/V'] = '<f4'
elif colID == 34:
dtype_dict['|I|/A'] = '<f4'
elif colID == 35:
dtype_dict['Phase(Z)/deg'] = '<f4'
elif colID == 36:
dtype_dict['|Z|/Ohm'] = '<f4'
elif colID == 37:
dtype_dict['Re(Z)/Ohm'] = '<f4'
elif colID == 38:
dtype_dict['-Im(Z)/Ohm'] = '<f4'
elif colID == 39:
dtype_dict['I Range'] = '<u2'
elif colID == 70:
dtype_dict['P/W'] = '<f4'
elif colID == 434:
dtype_dict['(Q-Qo)/C'] = '<f4'
elif colID == 435:
dtype_dict['dQ/C'] = '<f4'
else:
raise NotImplementedError("column type %d not implemented" % colID)
return np.dtype(list(dtype_dict.items())), flags_dict, flags2_dict
hdr = np.fromstring(hdr_bytes, dtype=VMPmodule_hdr, count=1)
hdr_dict = dict(((n, hdr[n][0]) for n in VMPmodule_hdr.names))
hdr_dict['offset'] = fileobj.tell()
if read_module_data:
hdr_dict['data'] = fileobj.read(hdr_dict['length'])
if len(hdr_dict['data']) != hdr_dict['length']:
raise IOError("""Unexpected end of file while reading data
current module: %s
length read: %d
length expected: %d""" % (hdr_dict['longname'],
len(hdr_dict['data']),
hdr_dict['length']))
yield hdr_dict
else:
yield hdr_dict
fileobj.seek(hdr_dict['offset'] + hdr_dict['length'], SEEK_SET)
def read_VMP_modules(fileobj, read_module_data=True):
"""Reads in module headers in the VMPmodule_hdr format. Yields a dict with
the headers and offset for each module.
N.B. the offset yielded is the offset to the start of the data i.e. after
the end of the header. The data runs from (offset) to (offset+length)"""
while True:
module_magic = fileobj.read(len(b'MODULE'))
if len(module_magic) == 0: # end of file
raise StopIteration
elif module_magic != b'MODULE':
raise ValueError("Found %r, expecting start of new VMP MODULE" % module_magic)
hdr_bytes = fileobj.read(VMPmodule_hdr.itemsize)
if len(hdr_bytes) < VMPmodule_hdr.itemsize:
raise IOError("Unexpected end of file while reading module header")
class MPRfile:
"""Bio-Logic .mpr file
The file format is not specified anywhere and has therefore been reverse
engineered. Not all the fields are known.
Attributes
==========
modules - A list of dicts containing basic information about the 'modules'
of which the file is composed.
data - numpy record array of type VMPdata_dtype containing the main data
array of the file.
startdate - The date when the experiment started
enddate - The date when the experiment finished
"""
def __init__(self, file_or_path):
if isinstance(file_or_path, str):
mpr_file = open(file_or_path, 'rb')
else:
mpr_file = file_or_path
mpr_magic = b'BIO-LOGIC MODULAR FILE\x1a \x00\x00\x00\x00'
magic = mpr_file.read(len(mpr_magic))
if magic != mpr_magic:
raise ValueError('Invalid magic for .mpr file: %s' % magic)
modules = list(read_VMP_modules(mpr_file))
self.modules = modules
settings_mod, = (m for m in modules if m['shortname'] == b'VMP Set ')
data_module, = (m for m in modules if m['shortname'] == b'VMP data ')
maybe_log_module = [m for m in modules if m['shortname'] == b'VMP LOG ']
n_data_points = np.fromstring(data_module['data'][:4], dtype='<u4')
n_columns = np.fromstring(data_module['data'][4:5], dtype='u1')
if data_module['version'] == 0:
column_types = np.fromstring(data_module['data'][5:], dtype='u1',
count=n_columns)
remaining_headers = data_module['data'][5 + n_columns:100]
main_data = data_module['data'][100:]
elif data_module['version'] == 2:
column_types = np.fromstring(data_module['data'][5:], dtype='<u2',
count=n_columns)
## There is 405 bytes of data before the main array starts
remaining_headers = data_module['data'][5 + 2 * n_columns:405]
main_data = data_module['data'][405:]
else:
raise ValueError("Unrecognised version for data module: %d" %
data_module['version'])
if sys.version_info.major <= 2:
assert(all((b == '\x00' for b in remaining_headers)))
else:
assert(not any(remaining_headers))
self.dtype, self.flags_dict, self.flags2_dict = VMPdata_dtype_from_colIDs(column_types)
self.data = np.fromstring(main_data, dtype=self.dtype)
assert(self.data.shape[0] == n_data_points)
## No idea what these 'column types' mean or even if they are actually
## column types at all
self.version = int(data_module['version'])
self.cols = column_types
self.npts = n_data_points
tm = time.strptime(str3(settings_mod['date']), '%m/%d/%y')
self.startdate = date(tm.tm_year, tm.tm_mon, tm.tm_mday)
if maybe_log_module:
log_module, = maybe_log_module
tm = time.strptime(str3(log_module['date']), '%m/%d/%y')
self.enddate = date(tm.tm_year, tm.tm_mon, tm.tm_mday)
## There is a timestamp at either 465 or 469 bytes
## I can't find any reason why it is one or the other in any
## given file
ole_timestamp1 = np.fromstring(log_module['data'][465:],
dtype='<f8', count=1)
ole_timestamp2 = np.fromstring(log_module['data'][469:],
dtype='<f8', count=1)
ole_timestamp3 = np.fromstring(log_module['data'][473:],
dtype='<f8', count=1)
if ole_timestamp1 > 40000 and ole_timestamp1 < 50000:
ole_timestamp = ole_timestamp1
elif ole_timestamp2 > 40000 and ole_timestamp2 < 50000:
ole_timestamp = ole_timestamp2
elif ole_timestamp3 > 40000 and ole_timestamp3 < 50000:
ole_timestamp = ole_timestamp3
else:
raise ValueError("Could not find timestamp in the LOG module")
ole_base = datetime(1899, 12, 30, tzinfo=None)
ole_timedelta = timedelta(days=ole_timestamp[0])
self.timestamp = ole_base + ole_timedelta
if self.startdate != self.timestamp.date():
raise ValueError("""Date mismatch:
Start date: %s
End date: %s
Timestamp: %s""" % (self.startdate, self.enddate, self.timestamp))
def get_flag(self, flagname):
if flagname in self.flags_dict:
mask, dtype = self.flags_dict[flagname]
return np.array(self.data['flags'] & mask, dtype=dtype)
elif flagname in self.flags2_dict:
mask, dtype = self.flags2_dict[flagname]
return np.array(self.data['flags2'] & mask, dtype=dtype)
else:
raise AttributeError("Flag '%s' not present" % flagname)
def main(filename):
m = MPRfile(filename)
if __name__ == '__main__':
test_file = "../cellpy/data_ex/biologic/Bec01_01_1_C20_loop_20170219_01_MB_C02.mpr"
main(test_file)
| [
"[email protected]"
] | |
e3ea71ddfb85dc71b555b2e4eae31113519430a1 | 4819a4f99c6e283344bf81d05f98afb6555b4fe9 | /untitled1/urls.py | 8a2caf4fa2350d2a1cf343ecd51d3a00dc4e5e00 | [] | no_license | RafayelGardishyan/Schoolar | e6efe7280ac6c355421e34c3742f685a6c75b988 | 48e21c03486060e4bf6b0dd3e8f529f3faea0e9d | refs/heads/master | 2022-03-09T00:18:46.684387 | 2019-10-31T15:55:59 | 2019-10-31T15:55:59 | 170,994,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 930 | py | """untitled1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from . import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('learnit.urls'))
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) | [
"[email protected]"
] | |
0a3443fba16aaf8d469463c85e077f82a962c1a3 | e4f1f60c587fadab2af3082836b559f981a74015 | /pcmdpy/galaxy/sfhmodels.py | 633948280418a368fd72cfdc917404aad8e5503b | [
"MIT"
] | permissive | bacook17/pcmdpy | bb2cd4b224f6a7cad5ca638a94f8494945404c6a | ce2e9341efb1846e8c6c8bac27208603591ec525 | refs/heads/master | 2021-06-04T09:49:21.414770 | 2019-08-13T17:39:48 | 2019-08-13T17:39:48 | 113,083,573 | 7 | 2 | MIT | 2023-06-27T04:45:28 | 2017-12-04T19:09:52 | Batchfile | UTF-8 | Python | false | false | 12,206 | py | # sfhmodels.py
# Ben Cook ([email protected])
"""Define SFHModel classes to integrate with Galaxy Models"""
__all__ = ['BaseSFHModel', 'NonParam', 'ConstantSFR', 'TauModel', 'RisingTau',
'SSPModel', 'get_sfh_model', 'all_sfh_models']
import numpy as np
def get_sfh_model(name, *args, **kwargs):
if name.lower() == 'nonparam':
return NonParam(*args, **kwargs)
elif name.lower() == 'constant':
return ConstantSFR(*args, **kwargs)
elif name.lower() == 'tau':
return TauModel(*args, **kwargs)
elif name.lower() == 'risingtau':
return RisingTau(*args, **kwargs)
elif name.lower() == 'ssp':
return SSPModel(*args, **kwargs)
else:
raise NotImplementedError(
"given name {} not an acceptable SFH model. Choose one of:\n"
"{}".format(name.lower(), ['nonparam', 'constant', 'tau',
'risingtau', 'ssp']))
class BaseSFHModel:
default_SFH_edges = np.array([6., 8., 9., 9.5, 10., 10.2])
_num_SFH_bins = len(default_SFH_edges) - 1
def __init__(self):
assert hasattr(self, 'iso_edges'), ("iso_edges not set")
assert hasattr(self, 'SFH'), ('SFH not set')
if not hasattr(self, '_params'):
self._params = [None]
@property
def ages(self):
return 0.5*(self.iso_edges[:-1] + self.iso_edges[1:])
@property
def _num_isochrones(self):
return len(self.iso_edges) - 1
@property
def delta_ts(self):
return np.diff(10.**(self.iso_edges - 9.))
@property
def Npix(self):
return np.sum(self.SFH)
@property
def logNpix(self):
return np.log10(self.Npix)
@property
def logSFH(self):
return np.log10(self.SFH)
def get_vals(self):
return self.ages, self.SFH
def get_cum_sfh(self):
"""
Defined such that first age bin has 100% cum SFH
"""
normed_sfh = self.SFH / self.Npix
cum_sfh = 1. - np.cumsum(normed_sfh)
return np.append(1., cum_sfh)
def mass_frac_younger_than(self, age):
return np.sum(self.SFH[self.ages < age]) / self.Npix
def as_NonParam(self):
current_edges = self.iso_edges
self.update_edges(self.default_SFH_edges)
other = NonParam(self.logSFH, iso_step=-1,
sfh_edges=self.default_SFH_edges)
self.update_edges(current_edges)
return other
def as_default(self):
return self.as_NonParam()
def update_edges(self, new_edges):
self.iso_edges = new_edges
@property
def mean_age(self):
return np.average(self.ages, weights=self.SFH)
class NonParam(BaseSFHModel):
_params = np.array([None, None, None, None, None])
def __init__(self, initial_params=None, iso_step=0.2,
sfh_edges=None, iso_edges=None):
self.iso_step = iso_step
if iso_edges is None:
if iso_step > 0:
# construct list of ages, given isochrone spacing
self.iso_edges = np.arange(6.0, 10.3, iso_step)
else:
self.iso_edges = self.default_SFH_edges
else:
self.iso_edges = iso_edges
self.update_sfh_edges(sfh_edges if sfh_edges is not None else self.default_SFH_edges)
assert np.all(np.isclose(self.overlap_matrix.sum(axis=1), 1.0)), (
"The sums over the overlap matrix should all be near 1")
if initial_params is None:
initial_params = np.zeros(self._num_params, dtype=float)
self.set_params(initial_params)
super().__init__()
def copy(self):
return NonParam(initial_params=self._params,
iso_step=self.iso_step,
sfh_edges=self.sfh_edges,
iso_edges=self.iso_edges)
@property
def _deltat_sfh(self):
return np.diff(10.**(self.sfh_edges - 9.))
@property
def _num_params(self):
return len(self.sfh_edges) - 1
@property
def _param_names(self):
return ['logSFH{:d}'.format(i) for i in range(self._num_params)]
@property
def _fancy_names(self):
return [r'$\log\;$' + 'SFH{:d}'.format(i) for i in range(self._num_params)]
@property
def _default_prior_bounds(self):
return [[-3.0, 3.0]] * self._num_params
def set_params(self, sfh_params):
is_valid = (hasattr(sfh_params, '__len__') and
len(sfh_params) == self._num_params)
assert is_valid, ('sfh_params must be an array or list of length '
'{:d}, not {:d}'.format(self._num_params,
len(sfh_params)))
sfh_params = sfh_params.astype(float)
self.SFH = np.dot(10.**sfh_params, self.overlap_matrix)
assert np.isclose(self.Npix, np.sum(10.**sfh_params))
self._params = sfh_params
def from_sfr(self, sfr_params):
sfh_params = np.log10(self._deltat_sfh) + sfr_params
self.set_params(sfh_params)
def update_sfh_edges(self, new_edges):
self.sfh_edges = new_edges
self.overlap_matrix = _build_overlap_matrix(10.**self.sfh_edges,
10.**self.iso_edges)
self.set_params(np.zeros(self._num_params))
def update_edges(self, new_edges):
self.iso_edges = new_edges
self.overlap_matrix = _build_overlap_matrix(10.**self.sfh_edges,
10.**self.iso_edges)
self.set_params(self._params)
def as_NonParam(self):
# transform current SFH into original SFH bins
_new_overlap = _build_overlap_matrix(10.**self.sfh_edges,
10.**self.default_SFH_edges)
sfh_params = np.log10(np.dot(10.**self._params, _new_overlap))
return NonParam(initial_params=sfh_params, iso_step=-1,
sfh_edges=self.default_SFH_edges)
class ConstantSFR(BaseSFHModel):
_param_names = ['logNpix']
_fancy_names = [r'$\log\; \mathrm{N_{pix}}$']
_num_params = len(_param_names)
_default_prior_bounds = [[0., 8.0]]
_params = [None]
def __init__(self, initial_params=None, iso_step=0.2):
"""
"""
self.iso_step = iso_step
if iso_step > 0:
self.iso_edges = np.arange(6.0, 10.3, iso_step)
else:
self.iso_edges = self.default_SFH_edges
if initial_params is None:
initial_params = np.zeros(self._num_params)
self.set_params(initial_params)
super().__init__()
def copy(self):
return ConstantSFR(initial_params=self._params,
iso_step=self.iso_step)
def set_params(self, logNpix):
if hasattr(logNpix, '__len__'):
assert len(logNpix) == self._num_params, ("params for "
"ConstantSFR should be "
"length {:d}, not {:d}".format(self._num_params, len(sfh_params)))
logNpix = logNpix[0]
self._params = np.array([logNpix], dtype=float)
@property
def SFH(self):
Npix = 10.**self._params[0]
return Npix * self.delta_ts / np.sum(self.delta_ts)
class TauModel(BaseSFHModel):
_param_names = ['logNpix', 'tau']
_fancy_names = [r'$\log\; \mathrm{N_{pix}}$', r'$\tau$']
_num_params = len(_param_names)
_default_prior_bounds = [[0., 8.0], [0.1, 20.]]
_params = [None, None]
def __init__(self, initial_params=None, iso_step=0.2):
"""
"""
self.iso_step = iso_step
if iso_step > 0:
self.iso_edges = np.arange(6.0, 10.3, iso_step)
else:
self.iso_edges = self.default_SFH_edges
if initial_params is None:
initial_params = np.array([0., 1.])
self.set_params(initial_params)
super().__init__()
def copy(self):
return TauModel(initial_params=self._params,
iso_step=self.iso_step)
def set_params(self, sfh_params):
is_valid = (hasattr(sfh_params, '__len__') and
len(sfh_params) == self._num_params)
assert is_valid, ('sfh_params must be an array or list of length '
'{:d}, not {:d}'.format(self._num_params,
len(sfh_params)))
self._params = np.array(sfh_params).astype(float)
@property
def SFH(self):
Npix = 10.**self._params[0]
tau = self._params[1]
ages_linear = 10.**(self.iso_edges - 9.) # convert to Gyrs
SFH_term = np.diff(np.exp(ages_linear/tau))
return Npix * SFH_term / np.sum(SFH_term)
class RisingTau(BaseSFHModel):
_param_names = ['logNpix', 'tau_rise']
_fancy_names = [r'$\log\;\mathrm{N_{pix}}$', r'$\tau$']
_num_params = len(_param_names)
_default_prior_bounds = [[0., 8.0], [0.1, 20.]]
_params = [None, None]
def __init__(self, initial_params=None, iso_step=0.2):
"""
"""
self.iso_step = iso_step
if iso_step > 0:
self.iso_edges = np.arange(6.0, 10.3, iso_step)
else:
self.iso_edges = self.default_SFH_edges
if initial_params is None:
initial_params = np.array([0., 1.])
self.set_params(initial_params)
super().__init__()
def copy(self):
return RisingTau(initial_params=self._params,
iso_step=self.iso_step)
def set_params(self, sfh_params):
is_valid = (hasattr(sfh_params, '__len__') and
len(sfh_params) == self._num_params)
assert is_valid, ('sfh_params must be an array or list of length '
'{:d}, not {:d}'.format(self._num_params,
len(sfh_params)))
self._params = np.array(sfh_params).astype(float)
@property
def SFH(self):
Npix = 10.**self._params[0]
tau = self._params[1]
ages_linear = 10.**(self.iso_edges - 9.) # convert to Gyrs
base_term = (ages_linear[-1]+tau-ages_linear) * np.exp(ages_linear/tau)
SFH_term = np.diff(base_term)
return Npix * SFH_term / np.sum(SFH_term)
class SSPModel(BaseSFHModel):
_param_names = ['logNpix', 'logage']
_fancy_names = [r'$\log\;\mathrm{N_{pix}}$', r'$\log$ age (yr)']
_num_params = len(_param_names)
_default_prior_bounds = [[0., 8.0], [8.0, 10.5]]
_params = [None, None]
def __init__(self, initial_params=None, iso_step=None):
"""
"""
if initial_params is None:
initial_params = np.array([0.0, 10.0])
self.iso_step = iso_step
self.set_params(initial_params)
super().__init__()
def copy(self):
return SSPModel(initial_params=self._params,
iso_step=self.iso_step)
def set_params(self, sfh_params):
is_valid = (hasattr(sfh_params, '__len__') and
len(sfh_params) == self._num_params)
assert is_valid, ('sfh_params must be an array or list of length '
'{:d}, not {:d}'.format(self._num_params,
len(sfh_params)))
Npix = 10.**sfh_params[0]
self.SFH = np.array([Npix])
self.iso_edges = np.array([-0.1, 0.1]) + sfh_params[1]
self._params = sfh_params.astype(float)
all_sfh_models = [NonParam, TauModel, RisingTau,
SSPModel, ConstantSFR]
def _overlap(left1, right1, left2, right2):
x = (min(right1, right2) - max(left1, left2)) / (right1 - left1)
return max(0, x)
def _build_overlap_matrix(arr1, arr2):
result = np.zeros((len(arr1)-1, len(arr2)-1))
for i in range(len(arr1)-1):
for j in range(len(arr2)-1):
result[i, j] = _overlap(arr1[i], arr1[i+1],
arr2[j], arr2[j+1])
return result
| [
"[email protected]"
] | |
b7f51ac07e35b2adf6dab304ed1b86b064e9a447 | 29cc0a662b62078e553c461f05ef999c76c0f51f | /Lab_01/connection.py | 7f6f22939e3081ad8120f3d5f4badfa55ace0957 | [] | no_license | fefeagus/Redes_Sistemas_Distribuidos_2015 | bd2978f439389d8f50cbe55a9681cede2530de26 | eee77359891d6c52083c2bd116c2ae65cf36af14 | refs/heads/master | 2023-04-14T13:46:13.935385 | 2017-09-12T03:37:50 | 2017-09-12T03:37:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,300 | py | # encoding: utf-8
# Copyright 2014 Carlos Bederián
# $Id: connection.py 455 2011-05-01 00:32:09Z carlos $
import os
import socket
from constants import *
import server
class Connection(object):
"""
Conexión punto a punto entre el servidor y un cliente.
Se encarga de satisfacer los pedidos del cliente hasta
que termina la conexión.
"""
def __init__(self, socket, directory):
# Inicialización de conexión
self.sock = socket
self.dir = directory
self.buff_in = ''
self.buff_out = ''
self.connection_active = True
def es_nombre_valido(self, name_file):
"""
Devuelve True si el nombre ingresado contiene caracteres validos
o False en caso contrario.
"""
nombre = set(name_file) - VALID_CHARS
return nombre == set([])
def send_buffer(self):
"""
Envia datos para ser recibidos por el cliente.
"""
while self.buff_out:
cant_bytes = self.sock.send(self.buff_out)
assert cant_bytes > 0
self.buff_out = self.buff_out[cant_bytes:]
def unknown_command(self):
"""
Mensaje de comando inválido.
"""
self.buff_out += str(INVALID_COMMAND)
self.buff_out += space + error_messages[INVALID_COMMAND] + EOL
self.send_buffer()
def wrong_arg_q(self):
"""
Mensaje de argumentos inválidos.
"""
self.buff_out += str(INVALID_ARGUMENTS)
self.buff_out += space + error_messages[INVALID_ARGUMENTS] + EOL
self.send_buffer()
def file_not_found(self):
"""
Mensaje de archivo inexistente.
"""
self.buff_out += str(FILE_NOT_FOUND)
self.buff_out += space + error_messages[FILE_NOT_FOUND] + EOL
self.send_buffer()
def bad_offset(self):
"""
Mensaje de posicion inexistente en un archivo.
"""
self.buff_out += str(BAD_OFFSET)
self.buff_out += space + error_messages[BAD_OFFSET] + EOL
self.send_buffer()
def bad_eol(self):
"""
Mensaje de que se encontro un caracter r\n fuera de un terminador
de pedido EOL.
"""
self.buff_out += str(BAD_EOL)
self.buff_out += space + error_messages[BAD_EOL] + EOL
self.send_buffer()
def get_file_listing(self):
"""
Lista los archivos de un directorio.
"""
try:
lista = os.listdir(self.dir)
except:
print('INTERNAL SERVER ERROR')
raise INTERNAL_ERROR
else:
self.buff_out += "0 OK" + EOL
for x in lista:
self.buff_out += x
self.buff_out += EOL
self.buff_out += EOL
self.send_buffer()
def get_metadata(self, name_file):
"""
Devuelve el tamaño del archivo dado (en bytes).
"""
is_valid_name = self.es_nombre_valido(name_file)
file_exist = os.path.isfile(os.path.join(self.dir, name_file))
if not is_valid_name: # si el nombre de archivo es valido
self.wrong_arg_q()
elif not file_exist:
self.file_not_found()
# Error interno del servidor
else:
try:
data = os.path.getsize(os.path.join(self.dir, name_file))
except:
print('INTERNAL SERVER ERROR')
raise INTERNAL_ERROR
else:
self.buff_out += "0 OK" + EOL + str(data) + EOL
self.send_buffer()
def get_slice(self, avl_file, offset, size):
"""
Leer y muestra los datos del archivo ingresado desde el OFFSET hasta
OFFSET + SIZE.
"""
file_exist = os.path.isfile(os.path.join(self.dir, avl_file))
if not file_exist:
self.file_not_found()
else:
try:
offset2 = int(offset)
size2 = int(size)
except ValueError:
self.wrong_arg_q()
else:
size_file = size2
start_read = offset2
len_file = os.path.getsize(os.path.join(self.dir, avl_file))
offset_plus = start_read > len_file
size_plus = (start_read + size_file) > len_file
if offset_plus or size_plus:
self.bad_offset()
else:
try:
file_open = open(os.path.join(self.dir, avl_file), 'r')
except IOError:
print("el archivo no se pudo abrir")
raise INTERNAL_ERROR
file_open.seek(start_read)
self.buff_out += "0 OK" + EOL
remain = size_file
while remain > 0:
last_part = min(remain, SIZE_READ)
bytes_read = file_open.read(last_part)
self.buff_out += str(len(bytes_read))
self.buff_out += space + bytes_read + EOL
remain -= len(bytes_read)
self.send_buffer()
self.buff_out += "0 " + EOL
self.send_buffer()
def quit(self):
"""
Cierra la conexion al cliente.
"""
self.buff_out += str(CODE_OK) + " Listo!" + EOL
self.send_buffer()
self.sock.close()
self.connection_active = False
def analizar(self, command):
"""
Analiza si el pedido esta bien escrito y si contiene la cantidad
de argumentos necesarios para cada método.
"""
c_tmp = command.split(space)
if c_tmp[0] == 'get_file_listing':
if len(c_tmp) == 1:
self.get_file_listing()
else:
self.wrong_arg_q()
elif c_tmp[0] == 'get_metadata':
if len(c_tmp) != 2 or c_tmp[1] == '':
self.wrong_arg_q()
else:
self.get_metadata(c_tmp[1])
elif c_tmp[0] == 'get_slice':
if len(c_tmp) == 4:
self.get_slice(c_tmp[1], c_tmp[2], c_tmp[3])
else:
self.wrong_arg_q()
elif c_tmp[0] == 'quit':
if len(c_tmp) == 1:
self.quit()
else:
self.wrong_arg_q()
else:
self.unknown_command()
def handle(self):
"""
Atiende eventos de la conexión hasta que termina.
"""
# Maneja recepciones y envíos hasta desconexión
while self.connection_active:
# Recibe datos hasta recibir un EOL
while EOL not in self.buff_in:
rec = self.sock.recv(SIZE_READ)
self.buff_in += rec
# Separa el primer "pedido" del resto
request, self.buff_in = self.buff_in.split(EOL, 1)
# Se fija que no exista error tipo 100
if new_line in request:
self.bad_eol()
# Analiza el primer "pedido" recibido
else:
self.analizar(request)
# Cerramos el socket en desconexión
self.sock.close()
| [
"[email protected]"
] | |
20413f0c344df7cbdad1bb7338a11aa39fc9861d | 48460db1a6fdc6c09845c86cf5fa257f1a32f08a | /leetcode/medium/0949_Largest_Time_for_Given_Digits.py | 0a85a9dd6fad4107f8c6a0e5a7d7bc8004502a85 | [] | no_license | MichalBrzozowski91/algorithms | 9d0b085621ed94b1aff5473663fbdc686463cd8d | ae57535b574a800c6300eae7d55b21f2432c3baa | refs/heads/master | 2022-12-20T08:00:59.385002 | 2020-09-30T16:32:33 | 2020-09-30T16:32:33 | 290,835,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,053 | py | class Solution:
def largestTimeFromDigits(self, A: List[int]) -> str:
B = A.copy()
for firstDigitLimit in [2,1]:
A = B.copy()
result = ''
temp = [a for a in A if a in range(firstDigitLimit + 1)]
if not temp:
return ''
dig = max(temp)
result += str(dig)
A.remove(dig)
# Second digit
if dig == 2:
temp = [a for a in A if a in [0,1,2,3]]
else:
temp = A
if not temp:
continue
dig = max(temp)
result += str(dig)
A.remove(dig)
# Third digit
temp = [a for a in A if a in [0,1,2,3,4,5]]
if not temp:
continue
dig = max(temp)
result += ':' + str(dig)
A.remove(dig)
# Fourth digit
dig = A[0]
result += str(dig)
return result
return '' | [
"[email protected]"
] | |
172e43d93c0b543dc370d654dd22753e9dd1cdfd | f7574ee7a679261e758ba461cb5a5a364fdb0ed1 | /MergeSortedArray.py | 25c884f75350c4b5cb98ff52b73b35e165289aaa | [] | no_license | janewjy/Leetcode | 807050548c0f45704f2f0f821a7fef40ffbda0ed | b4dccd3d1c59aa1e92f10ed5c4f7a3e1d08897d8 | refs/heads/master | 2021-01-10T19:20:22.858158 | 2016-02-26T16:03:19 | 2016-02-26T16:03:19 | 40,615,255 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,549 | py | class Solution(object):
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: void Do not return anything, modify nums1 in-place instead.
"""
i = 0
j = 0
for j in range(n):
while i < m+j and nums1[i] < nums2[j]:
i += 1
nums1.insert(i,nums2[j])
i += 1
nums1[m+j+1:] = nums2[j+1:]
# inster() slow the code down
def merge2(self, nums1, m, nums2, n):
l1, l2, end = m-1, n-1, m+n-1
while l1 >= 0 and l2 >= 0:
if nums1[l1] > nums2[l2]:
nums1[end] = nums1[l1]
l1 -= 1
else:
nums1[end] = nums2[l2]
l2 -= 1
end -= 1
if l1 < 0:
nums1[:l2+1] = nums2[:l2+1]
# 1-28
class Solution(object):
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: void Do not return anything, modify nums1 in-place instead.
"""
i,j,cur = m-1,n-1,m+n-1
while i>=0 and j>=0:
if nums1[i] > nums2[j]:
nums1[cur] = nums1[i]
i-=1
else:
nums1[cur] = nums2[j]
j -= 1
cur -= 1
if i < 0:
nums1[:cur+1] = nums2[:j+1]
| [
"[email protected]"
] | |
74c5c8c7b320b2dfc6dc3ab53abcf9739fd64eaa | 343bdaddfc66c6316e2cee490e9cedf150e3a5b7 | /0001_0100/0076/0076.py | 851fe3c3ab0a773579c4237f01aaebb9804a5a57 | [] | no_license | dm-alexi/acmp | af7f6b4484b78f5922f3b464406a0ba5dea0d738 | 3fa0016d132adfeab7937b3e8c9687a34642c93a | refs/heads/master | 2021-07-09T15:14:25.857086 | 2020-10-20T19:08:54 | 2020-10-20T19:08:54 | 201,908,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | def timeint(s):
return int(s[:2]) * 60 + int(s[3:])
with open("input.txt", "r") as f, open("output.txt", "w") as q:
m = 0
inlist, outlist = [], []
n = int(f.readline())
for i in range(n):
a, b = (timeint(x) for x in f.readline().split())
inlist.append(a)
outlist.append(b)
inlist.sort()
outlist.sort()
i, j, c = 0, 0, 0
while i < n:
if inlist[i] <= outlist[j]:
i += 1
c += 1
if c > m:
m = c
else:
j += 1
c -= 1
q.write(str(m))
| [
"[email protected]"
] | |
45716f1b021167af4d29c8f8aceb4dcacc7127bd | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/client_common/client_request_lib/data_sources/fake.py | 929662c454b82fe9c9e9246b4e9b77d255e999ca | [] | no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 33,763 | py | # 2017.08.29 21:52:16 Střední Evropa (letní čas)
# Embedded file name: scripts/client_common/client_request_lib/data_sources/fake.py
"""
Created on Jul 1, 2015
@author: oleg
"""
from functools import wraps, partial
from datetime import datetime, timedelta, time as dt_time
import random
from client_request_lib import exceptions
from client_request_lib.data_sources import base
EXAMPLES = {}
def _doResponse(callback, result, status_code, response_code):
callback(result, status_code, response_code)
def fake_method(example):
def wrapper(func):
@wraps(func)
def wrapped(self, callback, *args, **kwargs):
try:
result = func(self, *args, **kwargs)
response_code = exceptions.ResponseCodes.NO_ERRORS
status_code = 200
except exceptions.BaseRequestError as e:
result = {'description': e.description}
status_code = e.status_code
response_code = e.response_code
except:
raise
_doResponse(callback, result, status_code, response_code)
name = func.__name__
if 'get_' in name:
name = name.split('get_', 1)[-1]
EXAMPLES[name] = example
return wrapped
return wrapper
def paginated_method(func):
@wraps(func)
def wrapped(*args, **kwargs):
offset = kwargs.pop('offset') or 0
limit = kwargs.pop('limit') or 18
diapasone = slice(offset, offset + limit)
get_total_count = kwargs.pop('get_total_count', False)
result = func(*args, **kwargs)
total = len(result)
result = {'items': result[diapasone]}
if get_total_count:
result['total'] = total
return result
return wrapped
class FakeDataAccessor(base.BaseDataAccessor):
"""
obtain fake data
`FakeDataAccessor` should be used for test purposes when one want emulate
expected backend response
:Example:
>>> fake_accessor = FakeDataAccessor()
>>> requester = Requester(fake_accessor)
>>> requester.login(str, 12312, 'sdfee23e2')
>>> def printer (*args, **kwargs):
pprint(args)
...
>>> requester.clans.get_account_applications_count_since(printer, 123)
(
{'total': 17},
200,
0
)
Use `requests_before_logout` to emulate session expiration.
Session will be considered as expired when `requests_before_logout` is made
use -1 for endless session (default behavior)
:Example:
>>> fake_accessor = FakeDataAccessor()
>>> fake_accessor.requests_before_logout = 2
>>> requester = Requester(fake_accessor)
>>> requester.login(str, 12312, 'sdfee23e2')
>>> def printer (*args, **kwargs):
print (args)
...
>>> requester.clans.get_account_applications_count_since(printer, 123)
({'total': 17}, 200, 0)
>>> requester.clans.get_account_applications_count_since(printer, 123)
({'total': 17}, 200, 0)
>>> requester.clans.get_account_applications_count_since(printer, 123)
('User is not authentificated', 403, 2)
To set expected result for method use `set_data` method
:Example:
>>> fake_accessor = FakeDataAccessor()
>>> requester = Requester(fake_accessor)
>>> requester.login(str, 12312, 'sdfee23e2')
>>> def printer (*args, **kwargs):
print (args)
>>> fake_accessor.set_data('account_applications_count_since', 14, {'total': 11})
>>> requester.clans.get_account_applications_count_since(printer, 14)
({'total': 11}, 200, 0)
>>> requester.clans.get_account_applications_count_since(printer, 123)
({'total': 17}, 200, 0)
To emulate error in response set data to error instance
:Example:
>>> fake_accessor = FakeDataAccessor()
>>> requester = Requester(fake_accessor)
>>> requester.login(str, 12312, 'sdfee23e2')
>>> def printer (*args, **kwargs):
print (args)
>>> fake_accessor.set_data('account_applications_count_since', 14, exceptions.PermissionDenied())
>>> requester.clans.get_account_applications_count_since(printer, 14)
('Forbidden', 403, 3)
>>> requester.clans.get_account_applications_count_since(printer, 123)
({'total': 17}, 200, 0)
"""
requests_before_logout = -1
def __init__(self, url_fetcher = None, config = None, client_lang = None, user_agent = None):
super(FakeDataAccessor, self).__init__()
self.client_lang = client_lang
self._account = None
self._storage = {}
self.account = None
self.user_agent = user_agent
return
def login(self, callback, account_id, spa_token):
self.account = account_id
self._account = self.requests_before_logout
result, status_code = ('ok', 200)
response_code = exceptions.ResponseCodes.NO_ERRORS
_doResponse(callback, result, status_code, response_code)
def get_alive_status(self, callback):
result, status_code = {'status': 'I am alive!'}, 200
response_code = exceptions.ResponseCodes.NO_ERRORS
_doResponse(callback, result, status_code, response_code)
def logout(self, callback):
self.account = None
self._account = None
result, status_code = ('ok', 200)
response_code = exceptions.ResponseCodes.NO_ERRORS
_doResponse(callback, result, status_code, response_code)
return
def _filter_data(self, data, fields):
if isinstance(data, list):
return [ self._filter_data(i, fields) for i in data ]
return {k:v for k, v in data.iteritems() if k in fields}
def _request_data(self, section, entity_id, fields = None):
if not self._account:
raise exceptions.AuthentificationError()
self._account -= 1
try:
result = self._storage[section][entity_id]
except KeyError:
result = EXAMPLES[section]
if callable(result):
result = result(entity_id)
self._storage.setdefault(section, {})[entity_id] = result
if isinstance(result, exceptions.BaseRequestError):
raise result
if fields:
result = self._filter_data(result, fields)
return result
def _compare_keys(self, example, data):
if isinstance(example, list):
for i in data:
self._compare_keys(example[0], i)
if isinstance(example, dict):
if set(example) ^ set(data):
missed = set(example) - set(data)
extra = set(data) - set(example)
message = []
if missed:
message.append('(%s) keys are missed' % ', '.join(missed))
if extra:
message.append('(%s) keys are not needed' % ', '.join(extra))
raise ValueError(' and '.join(message))
def set_data(self, section, entity_id, data):
"""
set fake data for different sections, compare keys while setting
possible sections are following:
- account_applications_count_since
- account_invites
- accounts_clans
- accounts_info
- accounts_names
- clan_applications
- clan_globalmap_stats
- clan_invites_count_since
- clan_invites
- clan_members
- clan_provinces
- clans_info
- clans_ratings
- fronts_info
- search_clans
- stronghold_info
- strongholds_state
- strongholds_statistics
"""
if not section in EXAMPLES:
raise AssertionError
example = EXAMPLES[section]
isinstance(data, exceptions.BaseRequestError) or self._compare_keys(example, data)
self._storage.setdefault(section, {})[entity_id] = data
@fake_method(example=lambda clan_id: {'clan_id': clan_id,
'xp_avg': random.randrange(1, 1000) / 10.0,
'efficiency': random.randrange(1, 10000),
'battles_count_avg': random.randrange(1, 10000),
'wins_ratio_avg': random.randrange(1, 100),
'gm_elo_rating_6': random.randrange(1, 1000),
'gm_elo_rating_8': random.randrange(1, 1000),
'gm_elo_rating_10': random.randrange(1, 1000),
'gm_elo_rating_6_rank': random.randrange(1, 1000),
'gm_elo_rating_8_rank': random.randrange(1, 1000),
'gm_elo_rating_10_rank': random.randrange(1, 1000),
'fb_elo_rating_8': random.randrange(1, 1000),
'fb_elo_rating_10': random.randrange(1, 1000),
'fb_battles_count_10_28d': random.randrange(1, 100),
'fs_battles_count_10_28d': random.randrange(1, 100),
'gm_battles_count_28d': random.randrange(1, 100),
'fs_battles_count_28d': random.randrange(1, 100),
'fb_battles_count_28d': random.randrange(1, 100)})
def get_clans_ratings(self, clan_ids, fields = None):
"""
return fake data from `clans_ratings` section
"""
return [ self._request_data('clans_ratings', i, fields=fields) for i in clan_ids ]
@fake_method(example=lambda clan_id: {'name': 'xxx',
'tag': 'ff',
'motto': 'yyyy',
'leader_id': 666,
'members_count': 13,
'clan_id': clan_id,
'created_at': datetime.now(),
'accepts_join_requests': True,
'treasury': 2423})
def get_clans_info(self, clan_ids, fields = None):
"""
return fake data from `clans_info` section
"""
return [ self._request_data('clans_info', clan_id, fields=fields) for clan_id in clan_ids ]
@fake_method(example=lambda acc_id: {'id': acc_id,
'name': 'name'})
def get_accounts_names(self, account_ids, fields = None):
"""
return fake data from `accounts_names` section
"""
return [ self._request_data('accounts_names', account_id, fields=fields) for account_id in account_ids ]
@fake_method(example=lambda clan_id: [ {'account_id': 2324 + i,
'role_name': 'officer',
'role_bw_flag': 1 << i,
'clan_id': clan_id,
'joined_at': datetime.now()} for i in range(11) ])
def get_clan_members(self, clan_id, fields = None):
"""
return fake data from `clan_members` section
"""
return self._request_data('clan_members', clan_id, fields=fields)
@fake_method(example={'clan_id': 2790,
'favorite_arena_6': 1,
'favorite_arena_8': 3,
'favorite_arena_10': 65549,
'favorite_primetime': dt_time(19, 0)})
def get_clan_favorite_attributes(self, clan_id, fields = None):
"""
return fake data from `clan_favorite_attributes` section
"""
return self._request_data('clan_favorite_attributes', clan_id, fields=fields)
@fake_method(example={'total': 17})
def get_account_applications_count_since(self, account_id, since = None):
"""
return fake data from `account_applications_count_since` section
"""
return self._request_data('account_applications_count_since', account_id)
@fake_method(example={'total': 14})
def get_clan_invites_count_since(self, clan_id, since = None):
"""
return fake data from `clan_invites_count_since` section
"""
return self._request_data('clan_invites_count_since', clan_id)
@fake_method(example={'account_id': 234,
'joined_at': datetime.now(),
'clan_id': 343,
'role_bw_flag': 13,
'role_name': 'commander',
'in_clan_cooldown_till': datetime.now(),
'clan_tag': 'fake',
'clan_color': 123})
def get_accounts_clans(self, account_ids, fields):
"""
return fake data from `accounts_clans` section
"""
return [ self._request_data('accounts_clans', i, fields=fields) for i in account_ids ]
@fake_method(example=lambda (account_id, statuses): [ {'status': random.choice(statuses or ('active', 'declined', 'cancelled', 'accepted', 'expired', 'error', 'deleted')),
'created_at': datetime.now(),
'updated_at': datetime.now(),
'sender_id': random.randrange(1, 10000),
'id': random.randrange(1, 1000000),
'account_id': account_id,
'clan_id': random.randrange(1, 10000),
'status_changer_id': random.randrange(1, 10000),
'comment': 'Welcome {}!'.format(random.randrange(1, 10000)) if random.choice((1, 0)) else ''} for i in range(random.randrange(0, 1000)) ])
@paginated_method
def get_account_applications(self, fields = None, statuses = None):
"""
return fake data from `account_applications` section
"""
return self._request_data('account_applications', (self.account, tuple(statuses or [])), fields=fields)
@fake_method(example=lambda (clan_id, statuses): [ {'status': random.choice(statuses or ('active', 'declined', 'cancelled', 'accepted', 'expired', 'error', 'deleted')),
'created_at': datetime.now(),
'updated_at': datetime.now(),
'sender_id': random.randrange(1, 10000),
'id': random.randrange(1, 1000000),
'account_id': random.randrange(1, 10000),
'clan_id': clan_id,
'status_changer_id': random.randrange(1, 10000),
'comment': 'Welcome {}!'.format(random.randrange(1, 10000)) if random.choice((1, 0)) else ''} for i in range(random.randrange(0, 1000)) ])
@paginated_method
def get_clan_applications(self, clan_id, fields = None, statuses = None):
"""
return fake data from `clan_applications` section
"""
return self._request_data('clan_applications', (clan_id, tuple(statuses or [])), fields=fields)
@fake_method(example=lambda search: ([] if len(search) % 2 else [ {'name': 'Clan Name %d' % random.randrange(1, 1000),
'tag': 'TCLAN',
'motto': 'Clan Motto',
'leader_id': random.randrange(1, 10000),
'clan_id': random.randrange(1, 100),
'members_count': random.randrange(1, 50),
'created_at': datetime.now(),
'accepts_join_requests': random.choice((True, False))} for i in range(random.randrange(1, 36)) ]))
@paginated_method
def search_clans(self, search, fields = None):
"""
return fake data from `clans_info` section
"""
return self._request_data('search_clans', search)
@fake_method(example=lambda account: [ {'name': 'Clan Name %d' % random.randrange(1, 1000),
'tag': 'TCLAN',
'motto': 'Clan Motto',
'leader_id': random.randrange(1, 10000),
'clan_id': random.randrange(1, 100),
'members_count': random.randrange(1, 50),
'created_at': datetime.now(),
'accepts_join_requests': random.choice((True, False))} for i in range(random.randrange(1, 36)) ])
@paginated_method
def get_recommended_clans(self, fields = None):
"""
return fake data from `clans_info` section
"""
return self._request_data('recommended_clans', self.account)
@fake_method(example=lambda (clan_id, statuses): [ {'status': random.choice(statuses or ('active', 'declined', 'cancelled', 'accepted', 'expired', 'error', 'deleted')),
'created_at': datetime.now(),
'updated_at': datetime.now(),
'sender_id': random.randrange(1, 10000),
'id': random.randrange(1, 1000000),
'account_id': random.randrange(1, 10000),
'clan_id': clan_id,
'comment': 'Welcome {}!'.format(random.randrange(1, 10000)) if random.choice((1, 0)) else '',
'status_changer_id': 2132} for i in range(random.randrange(0, 1000)) ])
@paginated_method
def get_clan_invites(self, clan_id, fields = None, statuses = None):
"""
return fake data from `clan_invites` section
"""
return self._request_data('clan_invites', (clan_id, tuple(statuses or [])), fields=fields)
@fake_method(example=lambda (account_id, statuses): [ {'status': random.choice(statuses or ('active', 'declined', 'cancelled', 'accepted', 'expired', 'error', 'deleted')),
'created_at': datetime.now(),
'updated_at': datetime.now(),
'sender_id': random.randrange(1, 10000),
'id': random.randrange(1, 1000000),
'account_id': account_id,
'clan_id': random.randrange(1, 10000),
'status_changer_id': 2132,
'comment': 'Welcome {}!'.format(random.randrange(1, 10000)) if random.choice((1, 0)) else ''} for i in range(random.randrange(0, 1000)) ])
@paginated_method
def get_account_invites(self, fields = None, statuses = None):
"""
return fake data from `account_invites` section
"""
return self._request_data('account_invites', (self.account, tuple(statuses or [])), fields=fields)
@fake_method(example=lambda account_id: {'global_rating': random.randrange(100, 10000),
'battle_avg_xp': random.randrange(100, 10000),
'battles_count': random.randrange(1, 1000),
'battle_avg_performance': random.uniform(0, 1),
'xp_amount': random.randrange(100, 1000),
'account_id': account_id})
def get_accounts_info(self, account_ids, fields = None):
"""
return fake data from `accounts_info` section
"""
return [ self._request_data('accounts_info', acc_id, fields=fields) for acc_id in account_ids ]
@fake_method(example=[{'front_name': 'some_front',
'province_id': 'some_province',
'front_name_localized': 'some_front_localized',
'province_id_localized': 'some_province_localized',
'revenue': 324,
'hq_connected': True,
'prime_time': dt_time(18, 0, 0),
'periphery': 333,
'game_map': 'some_map',
'pillage_cooldown': 1,
'pillage_end_datetime': datetime.now() + timedelta(hours=3),
'turns_owned': 12}, {'front_name': 'some_front2',
'province_id': 'some_province2',
'front_name_localized': 'some_front_localized2',
'province_id_localized': 'some_province_localized2',
'revenue': 333,
'hq_connected': True,
'prime_time': dt_time(19, 0, 0),
'periphery': 444,
'game_map': 'some_map2',
'pillage_cooldown': None,
'pillage_end_datetime': None,
'turns_owned': 12,
'arena_id': 5}])
def get_clan_provinces(self, clan_id, fields = None):
"""
return fake data from `clan_provinces` section
"""
return self._request_data('clan_provinces', clan_id, fields=fields)
@fake_method(example={'battles_lost': 12,
'influence_points': 121,
'provinces_captured': 23,
'provinces_count': 234,
'battles_played': 332,
'battles_won': 232,
'battles_played_on_6_level': 21,
'battles_won_on_6_level': 12,
'battles_played_on_8_level': 32,
'battles_won_on_8_level': 21,
'battles_played_on_10_level': 43,
'battles_won_on_10_level': 23})
def get_clan_globalmap_stats(self, clan_id, fields = None):
"""
return fake data from `clan_globalmap_stats` section
"""
return self._request_data('clan_globalmap_stats', clan_id, fields=fields)
@fake_method(example=[{'front_name': 'front_name',
'front_name_localized': 'front_name_localized',
'min_vehicle_level': 2,
'max_vehicle_level': 4}])
def get_fronts_info(self, front_names = None, fields = None):
"""
return fake data from `fronts_info` section
"""
return self._request_data('fronts_info', front_names, fields=fields)
@fake_method(example={'defence_mode_is_activated': True,
'defence_hour': dt_time(10, 0),
'sortie_battles_count': 23,
'sortie_wins': 12,
'sortie_losses': 19,
'sortie_fort_resource_in_absolute': 100,
'sortie_fort_resource_in_champion': 71,
'sortie_fort_resource_in_middle': 60,
'defence_battles_count': 234,
'defence_combat_wins': 21,
'sortie_middle_battles_count': 12,
'sortie_champion_battles_count': 32,
'sortie_absolute_battles_count': 23,
'defence_enemy_base_capture_count': 43,
'defence_capture_enemy_building_total_count': 55,
'defence_loss_own_building_total_count': 65,
'defence_attack_efficiency': 23.2,
'defence_success_attack_count': 122,
'defence_attack_count': 13,
'defence_defence_efficiency': 32.2,
'defence_defence_count': 24,
'defence_success_defence_count': 5,
'total_resource_amount': 321,
'defence_resource_loss_count': 112,
'defence_resource_capture_count': 322,
'fb_battles_count_8': 23,
'fb_battles_count_10': 12,
'level': 2,
'buildings': [{'type': 1,
'direction': 0,
'level': 2,
'position': 2}, {'type': 2,
'direction': 1,
'level': 3,
'position': 2}]})
def get_stronghold_info(self, clan_id, fields = None):
"""
return fake data from `stronghold_info` section
"""
return self._request_data('stronghold_info', clan_id, fields=fields)
@fake_method(example={'buildings_count': 4,
'directions_count': 3,
'buildings': [{'type': 1,
'hp': 32,
'storage': 123,
'level': 4,
'position': 7,
'direction': 1}],
'directions': [1, 2],
'off_day': 3,
'vacation_start': datetime.utcnow() + timedelta(days=1),
'vacation_finish': datetime.utcnow() + timedelta(days=4),
'periphery_id': 333,
'clan_tag': 'tag',
'clan_name': 'some_name',
'clan_id': 21,
'level': 2,
'sortie_wins_period': 7,
'sortie_battles_wins_percentage_period': 20.0,
'sortie_battles_count_period': 122,
'defence_battles_count_period': 21})
def get_strongholds_statistics(self, clan_id, fields = None):
"""
return fake data from `strongholds_statistics` section
"""
return self._request_data('strongholds_statistics', clan_id, fields=fields)
@fake_method(example={'clan_id': 234,
'defence_hour': dt_time(10, 0)})
def get_strongholds_state(self, clan_id, fields = None):
"""
return fake data from `strongholds_state` section
"""
return self._request_data('strongholds_state', clan_id, fields=fields)
@fake_method(example=[{'clan_id': 234,
'account_id': 3,
'id': 23}])
def create_invites(self, clan_id, account_ids, comment, fields = None):
"""
return fake data from `create_invites` section
"""
return self._request_data('create_invites', (clan_id, account_ids), fields=fields)
@fake_method(example=[{'clan_id': 224,
'account_id': 3,
'id': 123}])
def create_applications(self, clan_ids, comment, fields = None):
"""
return fake data from `create_applications` section
"""
return self._request_data('create_applications', clan_ids, fields=fields)
@fake_method(example=lambda obj_id: {'transaction_id': 213,
'id': obj_id,
'account_id': 343,
'clan_id': 17})
def accept_application(self, application_id, fields = None):
"""
return fake data from `accept_application` section
"""
return self._request_data('accept_application', application_id, fields=fields)
@fake_method(example=lambda obj_id: {'id': obj_id,
'account_id': 343,
'clan_id': 17})
def decline_application(self, application_id, fields = None):
"""
return fake data from `decline_application` section
"""
return self._request_data('decline_application', application_id, fields=fields)
@fake_method(example=lambda obj_id: {'transaction_id': 213,
'id': obj_id,
'account_id': 343,
'clan_id': 17})
def accept_invite(self, invite_id, fields = None):
"""
return fake data from `accept_invite` section
"""
return self._request_data('accept_invite', invite_id, fields=fields)
@fake_method(example=lambda obj_id: {'id': obj_id,
'account_id': 343,
'clan_id': 17})
def decline_invite(self, invite_id, fields = None):
"""
return fake data from `decline_invite` section
"""
return self._request_data('decline_invite', invite_id, fields=fields)
@fake_method(example=[{'id': 991,
'account_id': 1001,
'clan_id': 19}, {'id': 992,
'account_id': 1001,
'clan_id': 19}, {'id': 993,
'account_id': 1001,
'clan_id': 19}])
def bulk_decline_invites(self, invite_ids):
"""
return fake data from `bulk_decline_invites` section
"""
return self._request_data('bulk_decline_invites', invite_ids)
@fake_method(example={'permissions': {'manage_reserves': ['commander',
'combat_officer',
'executive_officer',
'personnel_officer']},
'time_to_ready': 900,
'max_level': 10,
'battle_series_duration': 3600,
'enemy_clan': None,
'industrial_resource_multiplier': 1,
'max_players_count': 15,
'type': 'FORT_BATTLE',
'max_legionaries_count': 0,
'available_reserves': {'ARTILLERY_STRIKE': [],
'HIGH_CAPACITY_TRANSPORT': [],
'REQUISITION': [],
'AIRSTRIKE': []},
'direction': 'A',
'min_players_count': 1,
'matchmaker_next_tick': 1475578800,
'battle_series_status': [{'battle_reward': 0,
'gameplay_id': 0,
'geometry_id': 6,
'first_resp_clan_id': None,
'second_resp_clan_id': None,
'attacker': None,
'clan_owner_id': 14000012972L,
'current_battle': False,
'map_id': 6}, {'battle_reward': 0,
'gameplay_id': 0,
'geometry_id': 14,
'first_resp_clan_id': None,
'second_resp_clan_id': None,
'attacker': None,
'clan_owner_id': 14000012972L,
'current_battle': False,
'map_id': 14}, {'battle_reward': 0,
'gameplay_id': 0,
'geometry_id': 20,
'first_resp_clan_id': None,
'second_resp_clan_id': None,
'attacker': None,
'clan_owner_id': 14000012972L,
'current_battle': False,
'map_id': 20}],
'battle_duration': 600,
'requisition_bonus_percent': None,
'public': False,
'selected_reserves': [None, None, None],
'min_level': 1})
def get_wgsh_unit_info(self, periphery_id, unit_id, fields = None):
"""
return fake data from `wgsh_unit_info` section
"""
return self._request_data('wgsh_unit_info', unit_id)
@fake_method(example={})
def set_vehicle(self, periphery_id, unit_id, vehicle_cd, fields = None):
"""
return fake data from `set_vehicle` section
"""
return self._request_data('set_vehicle', unit_id)
@fake_method(example={})
def set_readiness(self, periphery_id, unit_id, is_ready, reset_vehicle, fields = None):
"""
return fake data from `set_readiness` section
"""
return self._request_data('set_readiness', unit_id)
@fake_method(example={})
def invite_players(self, periphery_id, unit_id, accounts_to_invite, comment, fields = None):
"""
return fake data from `invite_players` section
"""
return self._request_data('invite_players', unit_id)
@fake_method(example={})
def assign_player(self, periphery_id, unit_id, account_to_assign, fields = None):
"""
return fake data from `assign_player` section
"""
return self._request_data('assign_player', unit_id)
@fake_method(example={})
def unassign_player(self, periphery_id, unit_id, account_to_assign, fields = None):
"""
return fake data from `unassign_player` section
"""
return self._request_data('unassign_player', unit_id)
@fake_method(example={})
def give_leadership(self, periphery_id, unit_id, account_to_assign, fields = None):
"""
return fake data from `give_leadership` section
"""
return self._request_data('give_leadership', unit_id)
@fake_method(example={})
def leave_room(self, periphery_id, unit_id, fields = None):
"""
return fake data from `leave_room` section
"""
return self._request_data('leave_room', unit_id)
@fake_method(example={})
def take_away_leadership(self, periphery_id, unit_id, fields = None):
"""
return fake data from `take_away_leadership` section
"""
return self._request_data('take_away_leadership', unit_id)
@fake_method(example={})
def kick_player(self, periphery_id, unit_id, account_to_assign, fields = None):
"""
return fake data from `kick_player` section
"""
return self._request_data('kick_player', unit_id)
@fake_method(example={})
def set_open(self, periphery_id, unit_id, is_open, fields = None):
"""
return fake data from `set_open` section
"""
return self._request_data('set_open', unit_id)
@fake_method(example={})
def lock_reserve(self, periphery_id, unit_id, reserve_id, fields = None):
"""
return fake data from `lock_reserve` section
"""
return self._request_data('lock_reserve', unit_id)
@fake_method(example={})
def unlock_reserve(self, periphery_id, unit_id, reserve_id, fields = None):
"""
return fake data from `unlock_reserve` section
"""
return self._request_data('unlock_reserve', unit_id)
@fake_method(example=lambda clan_id: {'skirmishes_statistics': {'last_28_days_battles_count': 1,
'last_28_days_wins_count': 1,
'wins_count': 1,
'loses_count': 1,
'draws_count': 1},
'battles_statistics': {'last_28_days_battles_count': 1,
'last_28_days_wins_count': 1,
'wins_count': 1,
'loses_count': 1,
'draws_count': 1},
'skirmishes_count_last_28_days': 1,
'battles_count_last_28_days': 1,
'clear_wins_count': 1,
'level_6_statistics': {'wins_count': 1,
'battles_count': 1},
'level_8_statistics': {'wins_count': 1,
'battles_count': 1},
'level_10_statistics': {'wins_count': 1,
'battles_count': 1}})
def clan_statistics(self, clan_id, fields = None):
"""
return fake data from `clan_statistics` section
"""
return self._request_data('clan_statistics', clan_id)
@fake_method(example=lambda account_id: {'skirmishes_statistics': {'wins_count': 1,
'loses_count': 1,
'draws_count': 1},
'battles_statistics': {'wins_count': 1,
'loses_count': 1,
'draws_count': 1},
'industrial_resource_total': {'random_battles': 1,
'skirmishes': 1,
'battles': 1},
'industrial_resource_last_28_days': {'random_battles': 1,
'skirmishes': 1,
'battles': 1}})
def account_statistics(self, account_id, fields = None):
"""
return fake data from `account_statistics` section
"""
return self._request_data('account_statistics', account_id)
@fake_method(example={})
def join_room(self, periphery_id, unit_id, fields = None):
"""
return fake data from `join_room` section
"""
return self._request_data('join_room', unit_id)
@fake_method(example={'results': {'season': {'avg_exp': 6113244,
'total_battles': 2,
'battles_with_steps': 1,
'points': 91,
'avg_assist_damage': 2,
'avg_damage': 348}},
'meta': {'spa': {'id': 519}}})
def user_season_statistics(self, fields = None):
"""
return fake data from `user_season_statistics` section
"""
return self._request_data('user_season_statistics', None)
@fake_method(example={'meta': {'total': 224},
'results': {'spa_id': 502,
'position': 1}})
def user_ranked_position(self, fields = None):
"""
return fake data from `user_ranked_position` section
"""
return self._request_data('user_ranked_position', None)
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client_common\client_request_lib\data_sources\fake.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:52:17 Střední Evropa (letní čas)
| [
"[email protected]"
] | |
24a2b2bd01037bb5984627af29d73e874afe85da | 94ea21700381f12b72649a59d2c90ae32c7e04f0 | /addons/hc_medication_administration/models/hc_res_medication_administration.py | 43a24cc10010e8dd270762918187fcb536cf5171 | [] | no_license | messakali/odoo-fhir | c07e2d058763580de2929d4c84ebd4717ac15c43 | 1f5c28a3fdd788370696a5f75ab68a2acfe16d25 | refs/heads/master | 2021-01-10T22:35:55.158494 | 2016-09-28T17:21:56 | 2016-09-28T17:21:56 | 69,700,012 | 0 | 1 | null | 2016-09-30T20:30:57 | 2016-09-30T20:30:56 | null | UTF-8 | Python | false | false | 450 | py | # -*- coding: utf-8 -*-
from openerp import models, fields, api
# class hc_medication_administration(models.Model):
# _name = 'hc_medication_administration.hc_medication_administration'
# name = fields.Char()
# value = fields.Integer()
# value2 = fields.Float(compute="_value_pc", store=True)
# description = fields.Text()
#
# @api.depends('value')
# def _value_pc(self):
# self.value2 = float(self.value) / 100 | [
"[email protected]"
] | |
b9d463520440cefe1071f439c6ea17ab48c3146f | 9d6f8b37165f51cf1f07f527ee1b86cc507127af | /api_pytest_2020/api_2020_04_25_1314/a4.py | 73cef69a16ec6718fad574b3c43bad2c3e284c94 | [] | no_license | pangchuan99/web_auto | 61876df2e69a642a983aa5309a8423429d0105bf | 62d79675eb37a7163a0d1baef97a37e36058fa6d | refs/heads/master | 2022-12-02T18:05:44.996353 | 2020-08-19T07:09:44 | 2020-08-19T07:09:44 | 280,367,528 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | #作用域
# b = "200"
def function(b):
a = "111"
c = a+b
return c
b = "200"
print(function(b))
| [
"[email protected]"
] | |
f3a1b7d5b8f3c6718af758c89fae01723081f4ca | ca0757ab59d6420efae766dae80a539a3b692fbd | /apps/ippcdrupal/auth_backends.py | ba28716f1c21d575cec5c18d2e1d8708a507320f | [] | no_license | hypertexthero/itwishlist | bc1cfe7f3542a395ab439ee5aa71c1991baaadff | 148a085238ae86ee07255f94d3a48a92190ce5c5 | refs/heads/master | 2020-06-05T01:00:41.981168 | 2013-08-30T15:06:52 | 2013-08-30T15:06:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,052 | py | # =todo: authenticate against drupal users db
# Looks like we'll need to upgrade to Django 1.4...
from django.conf import settings
from django.contrib.auth import login
from django.contrib.auth.models import User, check_password
from itwishlist.apps.ippcdrupal.models import DrupalUsers
# from itwishlist.apps.ippcdrupal.hashers import is_password_usable, get_hasher
# =todo: upgrade to django 1.4
# from django.contrib.auth.hashers import is_password_usable, get_hasher
from django.utils.encoding import smart_str
# http://stackoverflow.com/questions/16482531/django-registration-custom-backend
# class DrupalUserAuthBackend(object):
# """
# Authenticates against django.contrib.auth.models.User. with my modifications
# """
# supports_inactive_user = True
#
# """
# This function does not upgrade the user password hasher
# """
# def check_password(self, password, encoded):
# if not password or not is_password_usable(encoded):
# # is_password_usable is only available in Django 1.4
# # https://docs.djangoproject.com/en/1.4/topics/auth/#django.contrib.auth.hashers.is_password_usable
# # if not password:
# return False
#
# password = smart_str(password)
# encoded = smart_str(encoded)
#
# if encoded[0] == "$":
# encoded = encoded[1:] # make it compatible so that drupal 7 sha512 hasher can work properly
#
# if len(encoded) == 32 and '$' not in encoded:
# hasher = get_hasher('unsalted_md5')
# else:
# algorithm = encoded.split('$', 1)[0]
# hasher = get_hasher(algorithm)
#
# is_correct = hasher.verify(password, encoded)
#
# return is_correct
#
# def authenticate(self, username=None, password=None, db=None, **kwargs):
# try:
# user = DrupalUsers.objects.using(db).get(name=username) # name in ippcdrupal.models.DrupalUsers
# if self.check_password(password, user.pass_field):
# return user
# except DrupalUsers.DoesNotExist:
# return None
# # http://query7.com/django-authentication-backends
# http://djangosnippets.org/snippets/2729/
# from account.models import Account
# from itwishlist.apps.ippcdrupal.drupalhasher.DrupalPasswordHasher import verify
# from django.contrib.auth.models import User
#
# class DrupalUserAuthBackend(object):
#
# def authenticate(self, username, password):
#
# try:
# account = DrupalUsers.objects.using('drupaldb').get(username=username, sha_pass_hash=verify(username, password))
#
# try:
# user = User.objects.get(username=username)
#
# except User.DoesNotExist:
#
# user = User(username=account.username)
# user.is_staff = False
# user.is_superuser = False
# user.set_unusable_password()
# user.save()
#
# return user
#
# except Account.DoesNotExist:
#
# return None
#
# def get_user(self, id):
# try:
# return User.objects.get(id=id)
# except User.DoesNotExist:
# return None
class DrupalUserAuthBackend:
"""
Authenticate against the settings ADMIN_LOGIN and ADMIN_PASSWORD.
Use the login name, and a hash of the password. For example:
ADMIN_LOGIN = 'admin'
ADMIN_PASSWORD = 'sha1$4e987$afbcf42e21bd417fb71db8c66b321e9fc33051de'
"""
supports_object_permissions = False
supports_anonymous_user = False
supports_inactive_user = False
def authenticate(self, username=None, password=None):
# login_valid = (settings.ADMIN_LOGIN == username)
# pwd_valid = check_password(password, settings.ADMIN_PASSWORD)
# if login_valid and pwd_valid:
try:
user = DrupalUsers.objects.using('drupaldb').get(name=username)
except DrupalUsers.DoesNotExist:
# Create a new user. Note that we can set password
# to anything, because it won't be checked; the password
# from settings.py will.
# user = User(username=username, password='test')
# user.is_staff = False
# user.is_active = False
# user.is_superuser = False
# user.save()
return None
# return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
# class DrupalUserAuthBackend(object):
# """
# Authenticates against ippcdrupal.models.DrupalUsers
# """
#
# def authenticate(self, username=None, password=None, **kwargs):
# # UserModel = get_user_model()
# # if username is None:
# # username = kwargs.get(UserModel.USERNAME_FIELD)
# try:
# user = DrupalUsers.objects.using('drupaldb').get(name=username) # name in ippcdrupal.models.DrupalUsers
# # if check_password(password):
# if check_password(password):
# return user
# except DrupalUsers.DoesNotExist:
# return None
# class SettingsBackend(object):
# """
# Authenticate against the settings ADMIN_LOGIN and ADMIN_PASSWORD.
#
# Use the login name, and a hash of the password. For example:
#
# ADMIN_LOGIN = 'admin'
# ADMIN_PASSWORD = 'sha1$4e987$afbcf42e21bd417fb71db8c66b321e9fc33051de'
# """
#
# def DrupalUserAuth(self, username=None, password=None, db=None, **kwargs):
# login_valid = (settings.ADMIN_LOGIN == username)
# pwd_valid = check_password(password, settings.ADMIN_PASSWORD)
# if login_valid and pwd_valid:
# try:
# user = User.objects.using(db).get(username=name)
# if user.check_password(password):
# return user
# # user = User.objects.get(username=username)
# # except User.DoesNotExist:
# # # Create a new user. Note that we can set password
# # # to anything, because it won't be checked; the password
# # # from settings.py will.
# # user = User(username=username, password='get from settings.py')
# # user.is_staff = True
# # user.is_superuser = True
# # user.save()
# return user
# return None
#
# def get_user(self, user_id):
# try:
# return User.objects.get(pk=user_id)
# except User.DoesNotExist:
# return None
#
# from __future__ import unicode_literals
# from django.contrib.auth import get_user_model
# from django.contrib.auth.models import Permission
#
# class DrupalUserAuth(object):
# """
# Authenticates against django.contrib.auth.models.User.
# """
#
# def authenticate(self, username=None, password=None, db=None, **kwargs):
# UserModel = get_user_model()
# if username is None:
# username = kwargs.get(UserModel.USERNAME_FIELD)
# try:
# user = UserModel.objects.using(db).get(username=username)
# if user.check_password(password):
# return user
# except UserModel.DoesNotExist:
# return None
# from __future__ import unicode_literals
# from django.contrib.auth import get_user_model
# from django.contrib.auth.models import Permission
#
# class DrupalUserAuth(object):
# """
# Authenticates against django.contrib.auth.models.User.
# """
#
# def authenticate(self, username=None, password=None, db=None, **kwargs):
# UserModel = get_user_model()
# if username is None:
# username = kwargs.get(UserModel.USERNAME_FIELD)
# try:
# user = UserModel.objects.using(db).get(username=username)
# if user.check_password(password):
# return user
# except UserModel.DoesNotExist:
# return None
#
#
#
| [
"[email protected]"
] | |
5bb40c1749eaa3eac4770ac0d10b52827385b4ad | 19ffa66dc7ad2eb2e7f81783abbf365caf04dae9 | /Code/Mine/Python/SympyHelpers/MethodHelp/GeqPolyDirect/PolynomialWay.py | 94a1ef9bc9e2ebd9af3a024a9d79983f1aa7c711 | [] | no_license | jordanpitt3141/gSGN | 648bd137760937bc7ab28dfcc11ec6b0039b33d9 | c62b88316ba583737500341d233982c3c3810dbd | refs/heads/master | 2023-02-25T23:48:39.919795 | 2021-01-27T04:54:52 | 2021-01-27T04:54:52 | 264,787,792 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,569 | py | """
Python code to generate csv files that contain integrals of the basis functions
needed to generate the matrices of the Finite Element Method.
by Jordan Pitt 11/10/2018
"""
"""
############################# IMPORTS #########################################
"""
from sympy import * #Related website: https://www.sympy.org/en/index.html
from IPython.display import display
# symbols
x,t,dx,dt,ga,b1 = symbols('x,t,dx,dt,g,beta1', positive = True, nonzero = True,real=True)
def PolyFromPoints(yjmh,yjms,yjps,yjph,dx):
a3 = (-9*yjmh + 27*yjms - 27*yjps + 9*yjph)/ (2*dx**3)
a2 = (9*yjmh - 9*yjms - 9*yjps + 9*yjph )/ (4*dx**2)
a1 = (yjmh - 27*yjms + 27*yjps - yjph )/ (8*dx)
a0 = (-yjmh+ 9*yjms + 9*yjps - yjph)/ 16
return a0,a1,a2,a3
def SolveForuEdges(beta1,ha0,ha1,ha2,ha3,Ga0,Ga1,Ga2,Ga3 ,dx):
print(beta1,ha0,ha1,ha2,ha3,Ga0,Ga1,Ga2,Ga3 ,dx)
if (abs(beta1) > 10.0**(-10)):
if (abs(ha3) < 10.0**(-15)):
ua3 =(108*Ga0*beta1**2*ha0**4*ha1*ha3**2 - 36*Ga0*beta1**2*ha0**4*ha2**2*ha3 + 576*Ga0*beta1**2*ha0**3*ha1**2*ha2*ha3 - 360*Ga0*beta1**2*ha0**3*ha1*ha2**3 - 216*Ga0*beta1**2*ha0**2*ha1**4*ha3 + 240*Ga0*beta1**2*ha0**2*ha1**3*ha2**2 + 24*Ga0*beta1**2*ha0*ha1**5*ha2 + 24*Ga0*beta1**2*ha1**7 - 21*Ga0*beta1*ha0**2*ha1**2*ha3 + 72*Ga0*beta1*ha0**2*ha1*ha2**2 - 22*Ga0*beta1*ha0*ha1**3*ha2 - 11*Ga0*beta1*ha1**5 + Ga0*ha0**2*ha3 - 2*Ga0*ha0*ha1*ha2 + Ga0*ha1**3 - 198*Ga1*beta1**2*ha0**5*ha3**2 - 426*Ga1*beta1**2*ha0**4*ha1*ha2*ha3 + 192*Ga1*beta1**2*ha0**4*ha2**3 + 240*Ga1*beta1**2*ha0**3*ha1**3*ha3 - 312*Ga1*beta1**2*ha0**3*ha1**2*ha2**2 - 48*Ga1*beta1**2*ha0**2*ha1**4*ha2 - 24*Ga1*beta1**2*ha0*ha1**6 + 10*Ga1*beta1*ha0**3*ha1*ha3 - 28*Ga1*beta1*ha0**3*ha2**2 + 33*Ga1*beta1*ha0**2*ha1**2*ha2 + 11*Ga1*beta1*ha0*ha1**4 + Ga1*ha0**2*ha2 - Ga1*ha0*ha1**2 + 132*Ga2*beta1**2*ha0**5*ha2*ha3 - 144*Ga2*beta1**2*ha0**4*ha1**2*ha3 + 168*Ga2*beta1**2*ha0**4*ha1*ha2**2 + 72*Ga2*beta1**2*ha0**3*ha1**3*ha2 + 24*Ga2*beta1**2*ha0**2*ha1**5 - 22*Ga2*beta1*ha0**4*ha3 - 44*Ga2*beta1*ha0**3*ha1*ha2 - 11*Ga2*beta1*ha0**2*ha1**3 + Ga2*ha0**2*ha1 + 90*Ga3*beta1**2*ha0**5*ha1*ha3 - 96*Ga3*beta1**2*ha0**5*ha2**2 - 6*Ga3*beta1**2*ha0**4*ha1**2*ha2 - 24*Ga3*beta1**2*ha0**3*ha1**4 + 22*Ga3*beta1*ha0**4*ha2 + 11*Ga3*beta1*ha0**3*ha1**2 - Ga3*ha0**3)/(ha0**4*(1188*beta1**3*ha0**4*ha3**2 - 4248*beta1**3*ha0**3*ha1*ha2*ha3 + 2304*beta1**3*ha0**3*ha2**3 - 792*beta1**3*ha0**2*ha1**3*ha3 + 1008*beta1**3*ha0**2*ha1**2*ha2**2 - 576*beta1**3*ha0*ha1**4*ha2 + 360*beta1**3*ha1**6 + 624*beta1**2*ha0**2*ha1*ha3 - 720*beta1**2*ha0**2*ha2**2 - 204*beta1**2*ha0*ha1**2*ha2 - 189*beta1**2*ha1**4 + 52*beta1*ha0*ha2 + 26*beta1*ha1**2 - 1))
ua2 = (54*Ga0*beta1**2*ha0**4*ha3**2 - 522*Ga0*beta1**2*ha0**3*ha1*ha2*ha3 + 144*Ga0*beta1**2*ha0**3*ha2**3 + 180*Ga0*beta1**2*ha0**2*ha1**3*ha3 + 36*Ga0*beta1**2*ha0**2*ha1**2*ha2**2 - 216*Ga0*beta1**2*ha0*ha1**4*ha2 - 108*Ga0*beta1**2*ha1**6 + 30*Ga0*beta1*ha0**2*ha1*ha3 - 36*Ga0*beta1*ha0**2*ha2**2 - 15*Ga0*beta1*ha0*ha1**2*ha2 + 39*Ga0*beta1*ha1**4 + Ga0*ha0*ha2 - Ga0*ha1**2 + 324*Ga1*beta1**2*ha0**4*ha2*ha3 - 243*Ga1*beta1**2*ha0**3*ha1**2*ha3 + 216*Ga1*beta1**2*ha0**3*ha1*ha2**2 + 324*Ga1*beta1**2*ha0**2*ha1**3*ha2 + 108*Ga1*beta1**2*ha0*ha1**5 - 9*Ga1*beta1*ha0**3*ha3 - 24*Ga1*beta1*ha0**2*ha1*ha2 - 39*Ga1*beta1*ha0*ha1**3 + Ga1*ha0*ha1 + 126*Ga2*beta1**2*ha0**4*ha1*ha3 - 144*Ga2*beta1**2*ha0**4*ha2**2 - 252*Ga2*beta1**2*ha0**3*ha1**2*ha2 - 108*Ga2*beta1**2*ha0**2*ha1**4 + 36*Ga2*beta1*ha0**3*ha2 + 39*Ga2*beta1*ha0**2*ha1**2 - Ga2*ha0**2 - 54*Ga3*beta1**2*ha0**5*ha3 + 72*Ga3*beta1**2*ha0**4*ha1*ha2 + 63*Ga3*beta1**2*ha0**3*ha1**3 - 21*Ga3*beta1*ha0**3*ha1)/(ha0**3*(1188*beta1**3*ha0**4*ha3**2 - 4248*beta1**3*ha0**3*ha1*ha2*ha3 + 2304*beta1**3*ha0**3*ha2**3 - 792*beta1**3*ha0**2*ha1**3*ha3 + 1008*beta1**3*ha0**2*ha1**2*ha2**2 - 576*beta1**3*ha0*ha1**4*ha2 + 360*beta1**3*ha1**6 + 624*beta1**2*ha0**2*ha1*ha3 - 720*beta1**2*ha0**2*ha2**2 - 204*beta1**2*ha0*ha1**2*ha2 - 189*beta1**2*ha1**4 + 52*beta1*ha0*ha2 + 26*beta1*ha1**2 - 1))
ua1 = (36*Ga0*beta1**2*ha0**3*ha2*ha3 - 432*Ga0*beta1**2*ha0**2*ha1**2*ha3 + 504*Ga0*beta1**2*ha0**2*ha1*ha2**2 - 384*Ga0*beta1**2*ha0*ha1**3*ha2 + 432*Ga0*beta1**2*ha1**5 + 6*Ga0*beta1*ha0**2*ha3 - 48*Ga0*beta1*ha0*ha1*ha2 - 27*Ga0*beta1*ha1**3 + Ga0*ha1 + 594*Ga1*beta1**2*ha0**3*ha1*ha3 - 576*Ga1*beta1**2*ha0**3*ha2**2 + 72*Ga1*beta1**2*ha0**2*ha1**2*ha2 - 432*Ga1*beta1**2*ha0*ha1**4 + 52*Ga1*beta1*ha0**2*ha2 + 27*Ga1*beta1*ha0*ha1**2 - Ga1*ha0 - 132*Ga2*beta1**2*ha0**4*ha3 + 72*Ga2*beta1**2*ha0**3*ha1*ha2 + 312*Ga2*beta1**2*ha0**2*ha1**3 - 4*Ga2*beta1*ha0**2*ha1 + 96*Ga3*beta1**2*ha0**4*ha2 - 162*Ga3*beta1**2*ha0**3*ha1**2 - 6*Ga3*beta1*ha0**3)/(ha0**2*(1188*beta1**3*ha0**4*ha3**2 - 4248*beta1**3*ha0**3*ha1*ha2*ha3 + 2304*beta1**3*ha0**3*ha2**3 - 792*beta1**3*ha0**2*ha1**3*ha3 + 1008*beta1**3*ha0**2*ha1**2*ha2**2 - 576*beta1**3*ha0*ha1**4*ha2 + 360*beta1**3*ha1**6 + 624*beta1**2*ha0**2*ha1*ha3 - 720*beta1**2*ha0**2*ha2**2 - 204*beta1**2*ha0*ha1**2*ha2 - 189*beta1**2*ha1**4 + 52*beta1*ha0*ha2 + 26*beta1*ha1**2 - 1))
ua0 = (1296*Ga0*beta1**3*ha0**4*ha3**2 - 5184*Ga0*beta1**3*ha0**3*ha1*ha2*ha3 + 2592*Ga0*beta1**3*ha0**3*ha2**3 - 1728*Ga0*beta1**3*ha0**2*ha1**3*ha3 + 2592*Ga0*beta1**3*ha0**2*ha1**2*ha2**2 - 2160*Ga0*beta1**3*ha0*ha1**4*ha2 + 1440*Ga0*beta1**3*ha1**6 + 702*Ga0*beta1**2*ha0**2*ha1*ha3 - 792*Ga0*beta1**2*ha0**2*ha2**2 - 378*Ga0*beta1**2*ha0*ha1**2*ha2 - 192*Ga0*beta1**2*ha1**4 + 54*Ga0*beta1*ha0*ha2 + 27*Ga0*beta1*ha1**2 - Ga0 + 648*Ga1*beta1**3*ha0**4*ha2*ha3 + 1296*Ga1*beta1**3*ha0**3*ha1**2*ha3 - 1296*Ga1*beta1**3*ha0**3*ha1*ha2**2 + 864*Ga1*beta1**3*ha0**2*ha1**3*ha2 - 1080*Ga1*beta1**3*ha0*ha1**5 - 18*Ga1*beta1**2*ha0**3*ha3 + 108*Ga1*beta1**2*ha0**2*ha1*ha2 + 3*Ga1*beta1**2*ha0*ha1**3 - Ga1*beta1*ha0*ha1 - 144*Ga2*beta1**3*ha0**4*ha1*ha3 - 288*Ga2*beta1**3*ha0**4*ha2**2 - 288*Ga2*beta1**3*ha0**3*ha1**2*ha2 + 720*Ga2*beta1**3*ha0**2*ha1**4 + 72*Ga2*beta1**2*ha0**3*ha2 + 66*Ga2*beta1**2*ha0**2*ha1**2 - 2*Ga2*beta1*ha0**2 - 108*Ga3*beta1**3*ha0**5*ha3 + 432*Ga3*beta1**3*ha0**4*ha1*ha2 - 360*Ga3*beta1**3*ha0**3*ha1**3 - 60*Ga3*beta1**2*ha0**3*ha1)/(ha0*(1188*beta1**3*ha0**4*ha3**2 - 4248*beta1**3*ha0**3*ha1*ha2*ha3 + 2304*beta1**3*ha0**3*ha2**3 - 792*beta1**3*ha0**2*ha1**3*ha3 + 1008*beta1**3*ha0**2*ha1**2*ha2**2 - 576*beta1**3*ha0*ha1**4*ha2 + 360*beta1**3*ha1**6 + 624*beta1**2*ha0**2*ha1*ha3 - 720*beta1**2*ha0**2*ha2**2 - 204*beta1**2*ha0*ha1**2*ha2 - 189*beta1**2*ha1**4 + 52*beta1*ha0*ha2 + 26*beta1*ha1**2 - 1))
elif (abs(ha2) < 10.0**(-15)):
ua3 = (-360*Ga0*beta1**2*ha0**3*ha1*ha2**3 + 240*Ga0*beta1**2*ha0**2*ha1**3*ha2**2 + 24*Ga0*beta1**2*ha0*ha1**5*ha2 + 24*Ga0*beta1**2*ha1**7 + 72*Ga0*beta1*ha0**2*ha1*ha2**2 - 22*Ga0*beta1*ha0*ha1**3*ha2 - 11*Ga0*beta1*ha1**5 - 2*Ga0*ha0*ha1*ha2 + Ga0*ha1**3 + 192*Ga1*beta1**2*ha0**4*ha2**3 - 312*Ga1*beta1**2*ha0**3*ha1**2*ha2**2 - 48*Ga1*beta1**2*ha0**2*ha1**4*ha2 - 24*Ga1*beta1**2*ha0*ha1**6 - 28*Ga1*beta1*ha0**3*ha2**2 + 33*Ga1*beta1*ha0**2*ha1**2*ha2 + 11*Ga1*beta1*ha0*ha1**4 + Ga1*ha0**2*ha2 - Ga1*ha0*ha1**2 + 168*Ga2*beta1**2*ha0**4*ha1*ha2**2 + 72*Ga2*beta1**2*ha0**3*ha1**3*ha2 + 24*Ga2*beta1**2*ha0**2*ha1**5 - 44*Ga2*beta1*ha0**3*ha1*ha2 - 11*Ga2*beta1*ha0**2*ha1**3 + Ga2*ha0**2*ha1 - 96*Ga3*beta1**2*ha0**5*ha2**2 - 6*Ga3*beta1**2*ha0**4*ha1**2*ha2 - 24*Ga3*beta1**2*ha0**3*ha1**4 + 22*Ga3*beta1*ha0**4*ha2 + 11*Ga3*beta1*ha0**3*ha1**2 - Ga3*ha0**3)/(ha0**4*(2304*beta1**3*ha0**3*ha2**3 + 1008*beta1**3*ha0**2*ha1**2*ha2**2 - 576*beta1**3*ha0*ha1**4*ha2 + 360*beta1**3*ha1**6 - 720*beta1**2*ha0**2*ha2**2 - 204*beta1**2*ha0*ha1**2*ha2 - 189*beta1**2*ha1**4 + 52*beta1*ha0*ha2 + 26*beta1*ha1**2 - 1))
ua2 = (144*Ga0*beta1**2*ha0**3*ha2**3 + 36*Ga0*beta1**2*ha0**2*ha1**2*ha2**2 - 216*Ga0*beta1**2*ha0*ha1**4*ha2 - 108*Ga0*beta1**2*ha1**6 - 36*Ga0*beta1*ha0**2*ha2**2 - 15*Ga0*beta1*ha0*ha1**2*ha2 + 39*Ga0*beta1*ha1**4 + Ga0*ha0*ha2 - Ga0*ha1**2 + 216*Ga1*beta1**2*ha0**3*ha1*ha2**2 + 324*Ga1*beta1**2*ha0**2*ha1**3*ha2 + 108*Ga1*beta1**2*ha0*ha1**5 - 24*Ga1*beta1*ha0**2*ha1*ha2 - 39*Ga1*beta1*ha0*ha1**3 + Ga1*ha0*ha1 - 144*Ga2*beta1**2*ha0**4*ha2**2 - 252*Ga2*beta1**2*ha0**3*ha1**2*ha2 - 108*Ga2*beta1**2*ha0**2*ha1**4 + 36*Ga2*beta1*ha0**3*ha2 + 39*Ga2*beta1*ha0**2*ha1**2 - Ga2*ha0**2 + 72*Ga3*beta1**2*ha0**4*ha1*ha2 + 63*Ga3*beta1**2*ha0**3*ha1**3 - 21*Ga3*beta1*ha0**3*ha1)/(ha0**3*(2304*beta1**3*ha0**3*ha2**3 + 1008*beta1**3*ha0**2*ha1**2*ha2**2 - 576*beta1**3*ha0*ha1**4*ha2 + 360*beta1**3*ha1**6 - 720*beta1**2*ha0**2*ha2**2 - 204*beta1**2*ha0*ha1**2*ha2 - 189*beta1**2*ha1**4 + 52*beta1*ha0*ha2 + 26*beta1*ha1**2 - 1))
ua1 = (504*Ga0*beta1**2*ha0**2*ha1*ha2**2 - 384*Ga0*beta1**2*ha0*ha1**3*ha2 + 432*Ga0*beta1**2*ha1**5 - 48*Ga0*beta1*ha0*ha1*ha2 - 27*Ga0*beta1*ha1**3 + Ga0*ha1 - 576*Ga1*beta1**2*ha0**3*ha2**2 + 72*Ga1*beta1**2*ha0**2*ha1**2*ha2 - 432*Ga1*beta1**2*ha0*ha1**4 + 52*Ga1*beta1*ha0**2*ha2 + 27*Ga1*beta1*ha0*ha1**2 - Ga1*ha0 + 72*Ga2*beta1**2*ha0**3*ha1*ha2 + 312*Ga2*beta1**2*ha0**2*ha1**3 - 4*Ga2*beta1*ha0**2*ha1 + 96*Ga3*beta1**2*ha0**4*ha2 - 162*Ga3*beta1**2*ha0**3*ha1**2 - 6*Ga3*beta1*ha0**3)/(ha0**2*(2304*beta1**3*ha0**3*ha2**3 + 1008*beta1**3*ha0**2*ha1**2*ha2**2 - 576*beta1**3*ha0*ha1**4*ha2 + 360*beta1**3*ha1**6 - 720*beta1**2*ha0**2*ha2**2 - 204*beta1**2*ha0*ha1**2*ha2 - 189*beta1**2*ha1**4 + 52*beta1*ha0*ha2 + 26*beta1*ha1**2 - 1))
ua0 =(2592*Ga0*beta1**3*ha0**3*ha2**3 + 2592*Ga0*beta1**3*ha0**2*ha1**2*ha2**2 - 2160*Ga0*beta1**3*ha0*ha1**4*ha2 + 1440*Ga0*beta1**3*ha1**6 - 792*Ga0*beta1**2*ha0**2*ha2**2 - 378*Ga0*beta1**2*ha0*ha1**2*ha2 - 192*Ga0*beta1**2*ha1**4 + 54*Ga0*beta1*ha0*ha2 + 27*Ga0*beta1*ha1**2 - Ga0 - 1296*Ga1*beta1**3*ha0**3*ha1*ha2**2 + 864*Ga1*beta1**3*ha0**2*ha1**3*ha2 - 1080*Ga1*beta1**3*ha0*ha1**5 + 108*Ga1*beta1**2*ha0**2*ha1*ha2 + 3*Ga1*beta1**2*ha0*ha1**3 - Ga1*beta1*ha0*ha1 - 288*Ga2*beta1**3*ha0**4*ha2**2 - 288*Ga2*beta1**3*ha0**3*ha1**2*ha2 + 720*Ga2*beta1**3*ha0**2*ha1**4 + 72*Ga2*beta1**2*ha0**3*ha2 + 66*Ga2*beta1**2*ha0**2*ha1**2 - 2*Ga2*beta1*ha0**2 + 432*Ga3*beta1**3*ha0**4*ha1*ha2 - 360*Ga3*beta1**3*ha0**3*ha1**3 - 60*Ga3*beta1**2*ha0**3*ha1)/(ha0*(2304*beta1**3*ha0**3*ha2**3 + 1008*beta1**3*ha0**2*ha1**2*ha2**2 - 576*beta1**3*ha0*ha1**4*ha2 + 360*beta1**3*ha1**6 - 720*beta1**2*ha0**2*ha2**2 - 204*beta1**2*ha0*ha1**2*ha2 - 189*beta1**2*ha1**4 + 52*beta1*ha0*ha2 + 26*beta1*ha1**2 - 1))
elif (abs(ha1) < 10.0**(-15)):
ua3 = (Ga0*ha1**3 - Ga1*ha0*ha1**2 + Ga2*ha0**2*ha1 - Ga3*ha0**3)/(ha0**4*(15*beta1*ha1**2 - 1))
ua2 = (-36*Ga0*beta1*ha1**4 + Ga0*ha1**2 + 36*Ga1*beta1*ha0*ha1**3 - Ga1*ha0*ha1 - 36*Ga2*beta1*ha0**2*ha1**2 + Ga2*ha0**2 + 21*Ga3*beta1*ha0**3*ha1)/(ha0**3*(120*beta1**2*ha1**4 - 23*beta1*ha1**2 + 1))
ua1 = (432*Ga0*beta1**2*ha1**5 - 27*Ga0*beta1*ha1**3 + Ga0*ha1 - 432*Ga1*beta1**2*ha0*ha1**4 + 27*Ga1*beta1*ha0*ha1**2 - Ga1*ha0 + 312*Ga2*beta1**2*ha0**2*ha1**3 - 4*Ga2*beta1*ha0**2*ha1 - 162*Ga3*beta1**2*ha0**3*ha1**2 - 6*Ga3*beta1*ha0**3)/(ha0**2*(360*beta1**3*ha1**6 - 189*beta1**2*ha1**4 + 26*beta1*ha1**2 - 1))
ua0 = (1440*Ga0*beta1**3*ha1**6 - 192*Ga0*beta1**2*ha1**4 + 27*Ga0*beta1*ha1**2 - Ga0 - 1080*Ga1*beta1**3*ha0*ha1**5 + 3*Ga1*beta1**2*ha0*ha1**3 - Ga1*beta1*ha0*ha1 + 720*Ga2*beta1**3*ha0**2*ha1**4 + 66*Ga2*beta1**2*ha0**2*ha1**2 - 2*Ga2*beta1*ha0**2 - 360*Ga3*beta1**3*ha0**3*ha1**3 - 60*Ga3*beta1**2*ha0**3*ha1)/(ha0*(360*beta1**3*ha1**6 - 189*beta1**2*ha1**4 + 26*beta1*ha1**2 - 1))
else:
ua3 =Ga3/ha0
ua2 =Ga2/ha0
ua1 =Ga1/ha0 + 6*Ga3*beta1*ha0
ua0 =Ga0/ha0 + 2*Ga2*beta1*ha0
else:
ua3 =Ga3/ha3
ua2 =Ga2/ha2
ua1 =Ga1/ha1
ua0 =Ga0/ha0
return ua0,ua1,ua3,ua2
dx = 0.1
h0 = 2
G0 = 1
print('A')
A = SolveForuEdges(2.0/3.0,2,2,2,2,1,1,1,1,dx)
print(A)
print('A')
A = SolveForuEdges(2.0/3.0,2,2,2,0,1,1,1,1,dx)
print(A)
print('A')
A = SolveForuEdges(2.0/3.0,2,2,0,0,1,1,1,1,dx)
print(A)
print('A')
A = SolveForuEdges(2.0/3.0,2,0,0,0,1,1,1,1,dx)
print(A)
# print('A')
# A = SolveForuEdges(2.0/3.0,1,2,3,4,0,0,0,0,dx)
# print('B')
# B = SolveForuEdges(2.0/3.0,1,2,3,4,1,0,0,0,dx)
# print('C')
# C = SolveForuEdges(2.0/3.0,1,2,3,4,1,1,0,0,dx)
# print('D')
# D = SolveForuEdges(2.0/3.0,1,2,3,4,1,1,1,0,dx)
# print('E')
# E = SolveForuEdges(2.0/3.0,1,2,3,4,1,1,1,1,dx)
#jpLets Check | [
"[email protected]"
] | |
b2e0397ffe57b93e5e6ae261bde6a10fee12cd3a | b213c8b10b831d5fdacfb65c145450f6af846a4f | /blog/blog.py | ce23082f7c014309cc37d87c9b6217fc56981450 | [] | no_license | tuomas56/random-python-stuff | 1df260532abeb0a3da02560ed23ad1ee1995f5b2 | 12737127a31f1a3b84456021e8a5ac81545324da | refs/heads/master | 2020-12-31T04:42:12.123345 | 2015-11-25T21:54:28 | 2015-11-25T21:54:28 | 46,889,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,649 | py | from bottle import server_names, ServerAdapter, run, request, Bottle, redirect,response, abort
import markdown
import re
import os
import pickle
import uuid
import scrypt
import base64
from datetime import datetime, timedelta
from cherrypy import wsgiserver
from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter
from config import SSL_PRIV_KEY, PASS_DB, SALT_DB, HASH_TIME
SCRIPT_RE = re.compile(r"\<script\>(.*?)\<\\script\>")
HASH_TIME = timedelta.strptime("%H:%M:%S")
InvalidUserPass = RuntimeError("Invalid username or password.")
class SSLCherryPy(ServerAdapter):
def run(self, handler):
server = wsgiserver.CherryPyWSGIServer((self.host, self.port), handler)
server.ssl_adapter = BuiltinSSLAdapter(SSL_PRIV_KEY, SSL_PRIV_KEY)
try:
server.start()
finally:
server.stop()
server_names['sslcherrypy'] = SSLCherryPy
def enable_cors(fn):
def _enable_cors(*args, **kwargs):
response.headers['Access-Control-Allow-Origin'] = '*'
return fn(*args, **kwargs)
return _enable_cors
app = Bottle()
current_hashes = {}
with open(PASS_DB, "rb") as f:
pass_db = pickle.load(f)
with open(SALT_DB, "rb") as f:
salt_db = pickle.load(f)
class HashData:
def __init__(self, hash, expiry, user):
self.hash = hash
self.expiry = expiry
self.user = user
def expired(self):
return self.expiry < datetime.now()
def authenticated(fn):
def _authenticated(hash, *args, **kwargs):
if hash in current_hashes:
if not current_hashes[hash].expired():
return fn(current_hashes[hash], *args, **kwargs)
else:
del current_hashes[hash]
redirect('/login/expired')
else:
redirect('/login/expired')
return _authenticated
def action_login(user, passwd):
if user not in pass_db or pass_db[user] != passwd_hash(user, passwd):
raise InvalidUserPass
else:
return generate_hash(user)
def generate_hash(user):
expiry = datetime.now() + HASH_TIME
hash = uuid.uuid4()
return Hash(hash, expiry, user)
def generate_salt():
return base64.b64encode(os.urandom(16)).decode()
def passwd_hash(user, passwd):
return salt_db[user] + scrypt.hash(passwd, salt_db[user], mintime=0.1)
@app.route("/do/login/<user>/<passwd>")
@enable_cors
def do_login(user, passwd):
try:
current_hashes[user] = action_login(user, passwd)
redirect('/home/%s' % current_hashes[user])
except RuntimeError:
redirect('/login/invalid')
@app.route("/login/<error>")
def login(error):
return template('pages/login.html.tpl', error=login_error(error))
def login_error(error):
if error = 'invalid':
return 'Invalid username or password.'
elif error = 'expired':
return 'Hash has expired; please login.'
elif error = 'none':
return ''
else:
raise RuntimeError("No such login error.")
class Article:
def __init__(self, author, date_written, tags, text):
self.author = author
self.date_written = date_written
self.tags = tags
self.text = text
class Comment:
def __init__(self, author, date_posted, parent, article, text):
self.author = author
self.date_posted = date_posted
self.parent = parent
self.article = article
self.text = text
def process_article(text):
lines = text.split("\n")
author, date_written, tags, *lines = lines
date_written = datetime.strptime(date_written, "%d/%m/%Y %H:%M")
tags = tags.split(",")
text = markdown.markdown('\n'.join(lines))
return Article(author, date_written, tags, text)
def process_comment(author, date_posted, parent, article, text):
return Comment(author, datetime.strptime(date_written, "%d/%m/%Y %H:%M"),article,SCRIPT_RE.replace(markdown.markdown(text), r"<code>\1</code>")) | [
"[email protected]"
] | |
b191a119c6debbe2643f12b03216b61002e09590 | 8f4c59e69cce2f6e932f55b3c65aae376b206a2c | /笨办法学python/ex47/skeleton/tests/ex47_tests.py | 00d322ae6ea3a7f953674e7ad506bc4a1713fde2 | [] | no_license | zmjm4/python | ef7206292f1c3a3a5763b25527024999de5e8e79 | 44cf74c0f16891c351ce214762218ccf2d7353a0 | refs/heads/master | 2020-05-27T17:23:48.776167 | 2018-05-24T07:14:16 | 2018-05-24T07:14:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,022 | py | # -*- coding: utf-8 -*-
from nose.tools import *
from ex47.game import Room
def test_room():
gold=Room("GoldRoom",
"""This room has gold in it you can grab. There's a
door to the north.""")
assert_equal(gold.name,"GoldRoom")
assert_equal(gold.paths,{})
def test_room_paths():
center = Room("Center", "Test room in the center.")
north = Room("North", "Test room in the north.")
south = Room("South", "Test room in the south.")
center.add_paths({'north':north,'south':south})
assert_equal(center.go('north'),north)
assert_equal(center.go('south'),south)
def test_map():
start = Room("Start", "You can go west and down a hole.")
west = Room("Trees", "There are trees here, you can go east.")
down = Room("Dungeon", "It's dark down here, you can go up.")
start.add_paths({'west':west,'down':down})
west.add_paths({'east':start})
down.add_paths({'up':start})
assert_equal(start.go('west'),west)
assert_equal(start.go('west').go('east'),start)
assert_equal(start.go('down').go('up'),start)
| [
"[email protected]"
] | |
58c46c9a110a1eb99789632d26ae3ae38b04e23d | 9463d85666453fd8e57a0ce9e515e4765ae2b60a | /cwetsy/cwetsy/parser/browse_parser.py | a049cb5b8bc4542890ee7856ce7379b97e183bed | [
"MIT"
] | permissive | trujunzhang/djzhang-targets | dc6c3086553a5450fb239cc1cef5330a51a02e1f | c2e327acde9d51f0455e7243f17d93d74b579501 | refs/heads/master | 2021-01-09T20:52:31.258826 | 2016-07-16T13:18:53 | 2016-07-16T13:18:53 | 60,747,429 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | from cwetsy.parser.base_parser import BaseParser
class BrowseParser(BaseParser):
def __init__(self):
super(BrowseParser, self).__init__()
def parse(self, url, hxs):
return None
| [
"[email protected]"
] | |
68e4256f5b371f2525935ebc77355c859a1a2757 | 2993adb383fed317e6a83f2b8c2cacd640d19fb3 | /bookmarks/account/authentication.py | 2a9db5db7359fcc4a5a5ddcca0b1e3170ebbf911 | [] | no_license | Dyavathrocky/socialapp | 0e811a957a224b30aa32e8a24e3253c1b49a25df | 1dc071b69f9258c4f540211e25635ac277a6f6e4 | refs/heads/master | 2022-12-02T03:42:32.778466 | 2020-08-21T13:19:25 | 2020-08-21T13:19:25 | 286,060,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | from django.contrib.auth.models import User
class EmailAuthBackend(object):
"""
Authenticate using an e-mail address.
"""
def authenticate(self, request, username=None, password=None):
try:
user = User.objects.get(email=username)
if user.check_password(password):
return user
return None
except User.DoesNotExist:
return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None | [
"[email protected]"
] | |
9478688498c1c1a485af4ce8894c0f2948b2b74b | 6223dc2e5de7921696cb34fb62142fd4a4efe361 | /.metadata/.plugins/org.eclipse.core.resources/.history/25/0083d7fa3b6a00141afa8a8ed49a3dc2 | 7b564846565a3335ffc0ed085fe8f0d38b42e923 | [] | no_license | Mushirahmed/python_workspace | 5ef477b2688e8c25b1372f546752501ee53d93e5 | 46e2ed783b17450aba29e4e2df7b656522b2b03b | refs/heads/master | 2021-03-12T19:24:50.598982 | 2015-05-25T10:23:54 | 2015-05-25T10:23:54 | 24,671,376 | 0 | 1 | null | 2015-02-06T09:27:40 | 2014-10-01T08:40:33 | Python | UTF-8 | Python | false | false | 5,466 | #!/usr/bin/env python
#
# Copyright 2014 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import numpy
#from operator import add
#import copy
#from gnuradio import gr
import gras
class expo(gras.Block):
"""
docstring for block expo
"""
def __init__(self):
gras.Block.__init__(self,
name="expo",
in_sig=[numpy.float32],
out_sig=[numpy.float32])
def set_parameters(self,g,a,b):
self.gama=g
self.alpha=a
self.beta=b
def yield_times(self):
from datetime import date, time, datetime, timedelta
start = datetime.combine(date.today(), time(0, 0))
yield start.strftime("%S")
while True:
start += timedelta(seconds=0.5)
yield start.strftime("%S")
def work(self, input_items, output_items):
in0 = input_items[0]
out = output_items[0]
tmrg = []
o1 = []
o2 = []
o3 = []
ans = []
final_output = []
gen = self.yield_times()
for ii in range(20):
tmrg.append(gen.next())
# print "tmrg :",tmrg
"""for i1 in range(0,10):
o1.append((self.gama)/(self.alpha*self.beta))
print "o1 : ", o1
for i2 in range(0,10):
o2.append(((self.gama)*(-numpy.exp(self.alpha)))/(self.alpha*(self.beta-self.alpha)))
print "o2 : ",o2
for i3 in range(0,10):
o3.append(((self.gama)*(-numpy.exp(self.beta)))/(self.beta*(self.alpha-self.beta)))
print "o3 : ",o3
#ans.append(o1+o2+o3)
for i in range(0,10):
ans.append(list(numpy.array(o1[i])+numpy.array(o2[i])+numpy.array(o3[i])))
print "Final Ans : ",ans
print "Type out : ",type(out)
print "Type ans :",type(ans)
out = copy.copy(ans)
#out[0:1] = ans
print "Output is : " ,out
self.consume(0,1)
self.produce(0,1)"""
#o1.append((self.gama)/(self.alpha*self.beta))
#print "o1 : ", o1
for i in range(0,20):
o1.append((self.gama)/(self.alpha*self.beta))
print "o1 : ", o1[i]
o2.append(((self.gama)*(numpy.exp(-(self.alpha*in0[0]*i)))/(self.alpha*(self.beta-self.alpha))))
print "o2 : ",o2[i]
o3.append(((self.gama)*(numpy.exp(-(self.beta*in0[0]*i)))/(self.beta*(self.alpha-self.beta))))
print "o3 : ",o3[i]
ans.append(o1[i]+o2[i]+o3[i])
print "Final Ans : ",ans
#print "Type out : ",type(out)
#print "Type ans :",type(ans)
#out[0:1] = ans
#print "Output : ", out[0]
"""for i in range(0,len(ans)):
#out = copy.copy(ans[i])
#out[0:1] = ans
#print "Output is : " ,out"""
"""for i1 in range(0,len(ans)):
final_output.append(o1+ans[i1])
print "Final OutPut : ", final_output"""
for i1 in range(0,len(ans)):
out[0] = ans[i1]
print "Output Sent : ", out
#out[:len(final_output)] = copy.copy(final_output)
self.consume(0,1)
self.produce(0,1)
"""result = []
for i in range(0,20):
result.append(numpy.exp(i))
print "Result : ",result
out[0] = result
self.consume(0,1)
self.produce(0,1) """
#o2 = -numpy.exp(-2*in0[0:1])
#o3 = -numpy.exp(-3*in0[0:1])
#o2=numpy.exp(-(in0[0:1]*self.alpha))
#print("o2 :",o2)
#o3=numpy.sin((self.freq*in0[0:1])+(self.sigma))
#print("o3 :",o3)
#o4=numpy.sqrt(o1-numpy.square(self.zita))
#print("o4 :",o4)
"""ans = o1-(mul/o4)
#ans.append(o1-((numpy.exp(-in0[0:1]*self.sigma)*(numpy.sin((self.freq*in0[0:1])+(self.sigma))))/numpy.sqrt(o1-numpy.square(self.zita))))
print("Final Value : ",ans)
out[0:1] = ans"""
#o2 = -numpy.exp(-2*tmrg)
#o3 = -numpy.exp(-3*in0[0:1])
#o2 = numpy.exp(-in0[0:1]*self.alpha)
#o3 = numpy.exp(-in0[0:1]*self.beta)
#o4 = numpy.sqrt(1-numpy.square(self.alpha))
#ans = 1-((o2*o3)/o4)
#ans.append(o2)
#ans.append(o1-((numpy.exp(-in0[0:1]*self.sigma)*(numpy.sin((self.freq*in0[0:1])+(self.sigma))))/numpy.sqrt(o1-numpy.square(self.zita))))
#print("Final Value : ",ans)
#out[0:1] = ans
#out = copy.copy(ans)
#self.consume(0,1)
#self.produce(0,1)
#return len(output_items[0])
| [
"[email protected]"
] | ||
d3e241d4b04a38c79e01d0b0348b62f60c6c72fa | b44ba1ca68154a37936ae3822ca016b5d9a99a2a | /Redis/redis_pipe.py | bfde6e48477131440764d56064521f1f1f917c54 | [] | no_license | liuxingrichu/advanced-network-program | 6e17d30980e21b3397ac5ed5e404a282983a6869 | 3f84c4600a35af12a68a4c512afbe60ddf6347b1 | refs/heads/master | 2021-01-23T02:05:45.933255 | 2017-08-06T09:15:54 | 2017-08-06T09:15:54 | 85,964,385 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import redis
import time
'''
使用pipeline实现一次请求,执行多条命令
'''
# db的选择范围为0-15
pool = redis.ConnectionPool(host='localhost', port=6379, db=12)
r = redis.Redis(connection_pool=pool)
# pipe = r.pipeline(transaction=False)
pipe = r.pipeline(transaction=True)
pipe.set('name', 'Tom')
time.sleep(30)
pipe.set('role', 'teacher')
pipe.execute()
| [
"[email protected]"
] | |
4c3c98dc139b2f8f584f48a9f1db91fb63471c18 | 5eea120356afc15cc3edb71f8864d6771ad865c6 | /futures/var_model/__init__.py | e9af4c55745c0234467df114b12290c6b8f19f73 | [
"MIT"
] | permissive | ShubraChowdhury/Investment_Finance | 469d5e5a200616eee830be18cb4a86d54319a30b | 3da761d755278d3d2de8c201b56d4ff9cb23def4 | refs/heads/master | 2022-12-12T11:52:33.585329 | 2021-09-23T18:13:15 | 2021-09-23T18:13:15 | 153,317,318 | 2 | 0 | null | 2022-12-08T00:45:34 | 2018-10-16T16:22:56 | Jupyter Notebook | UTF-8 | Python | false | false | 312 | py | """
The __init__.py files are required to make Python treat the directories as
containing packages; this is done to prevent directories with a common name,
such as string, from unintentionally hiding valid modules that occur later
(deeper) on the module search path.
@author: ucaiado
Created on 09/05/2016
"""
| [
"[email protected]"
] | |
91f6f93546e8240aff32445f1e68c11ccfe19d83 | 4d2238210813c1581bf44f64d8a63196f75d2df4 | /tem.py | ece18216c221c476bc14897a6b8a415a8a9197d1 | [] | no_license | wwtang/code02 | b1600d34907404c81fa523cfdaa74db0021b8bb3 | 9f03dda7b339d8c310c8a735fc4f6d795b153801 | refs/heads/master | 2020-12-24T14:10:33.738734 | 2012-12-14T04:24:47 | 2012-12-14T04:24:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | color = raw_input('please select the color: ')
if color == "white" or color == "black":
print "the color was black or white"
elif color > "k" :
print "the color start with letter after the 'K' in alphabet"
| [
"[email protected]"
] | |
a4ce7faf8a9617e3a7dcffa89948c091bf32dc3f | 1e11d6f9245c55e21edfb24f4340d52e3f7f327f | /dillo/migrations/0078_organizations.py | ecdc1eed4f69e90c13232e53b67ef2f646fc6389 | [] | no_license | armadillica/dillo | 996e8462f4f76349ecc49ecb08cdd6c8c66e072b | 960aed85f8438109bed9883321891305e1db8b10 | refs/heads/main | 2023-08-04T06:45:34.570071 | 2023-06-04T00:07:57 | 2023-06-04T00:07:57 | 30,461,275 | 79 | 18 | null | 2023-08-02T00:22:40 | 2015-02-07T16:17:43 | Python | UTF-8 | Python | false | false | 4,925 | py | # Generated by Django 3.2.13 on 2022-11-19 22:29
import dillo.models.mixins
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
def forwards_func(apps, schema_editor):
"""Set default cateogries."""
OrganizationCategory = apps.get_model('dillo', 'OrganizationCategory')
db_alias = schema_editor.connection.alias
for c in {'3D', '2D', 'Features', 'Shorts', 'Games'}:
OrganizationCategory.objects.using(db_alias).create(name=c)
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('dillo', '0077_profile_job'),
]
operations = [
migrations.CreateModel(
name='OrganizationCategory',
fields=[
(
'id',
models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('name', models.CharField(max_length=128, unique=True)),
],
options={
'verbose_name_plural': 'Organization categories',
},
),
migrations.CreateModel(
name='Organization',
fields=[
(
'id',
models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
(
'created_at',
models.DateTimeField(auto_now_add=True, verbose_name='date created'),
),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='date edited')),
('name', models.CharField(max_length=255, unique=True)),
(
'visibility',
models.CharField(
choices=[
('public', 'Public'),
('unlisted', 'Unlisted'),
('under_review', 'Under Review'),
],
default='under_review',
max_length=16,
),
),
(
'description',
models.TextField(
blank=True,
help_text='A description of the organization activities.',
null=True,
),
),
('website', models.URLField(max_length=120)),
(
'logo',
models.ImageField(
blank=True,
height_field='logo_height',
upload_to=dillo.models.mixins.get_upload_to_hashed_path,
width_field='logo_width',
help_text='A square picture, around 512x512.',
),
),
('logo_height', models.PositiveIntegerField(null=True)),
('logo_width', models.PositiveIntegerField(null=True)),
('city', models.CharField(blank=True, max_length=256, null=True)),
(
'country',
django_countries.fields.CountryField(blank=True, max_length=2, null=True),
),
(
'is_online',
models.BooleanField(
default=False, help_text='Operates fully online, with no physical HQ.'
),
),
('is_active', models.BooleanField(default=True)),
(
'categories',
models.ManyToManyField(
help_text='Keywords to identify this organization.',
null=True,
to='dillo.OrganizationCategory',
),
),
(
'city_ref',
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='organizations',
to='dillo.city',
),
),
(
'user',
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
'abstract': False,
},
),
migrations.RunPython(forwards_func, migrations.RunPython.noop),
]
| [
"[email protected]"
] | |
95482e1fc560e2c251c59b36d951f928ba1157ba | 06292f96cba132ca57777672a447cfff7c5abee6 | /week5/tut/submit/1.py | b099a6a693b68bb37a739181ac6b9f40fa36844d | [] | no_license | kietteik/ppl | 1746440b12affe71e67d6f958922b32b1fdaab5c | 2ee60582e81595b8d8b5d0f8212d20151cfe9264 | refs/heads/master | 2023-03-01T00:24:36.969189 | 2021-01-31T05:15:13 | 2021-01-31T05:15:13 | 305,802,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | def double(lst):
'''1. a'''
return [i * 2 for i in lst]
def double(lst):
'''1. b'''
if not lst: return []
return [lst[0] * 2] + double(lst[1:])
def double(lst):
'''1. c'''
return list(map(lambda x: x * 2, lst)) | [
"[email protected]"
] | |
bf0d24f0abd3437a1d7ea9f45e109f8389451f71 | 42c48f3178a48b4a2a0aded547770027bf976350 | /google/ads/google_ads/v4/proto/resources/ad_group_pb2.py | 9b8b6e30e51cd9adcbc7d0531551288d3aeb74cc | [
"Apache-2.0"
] | permissive | fiboknacky/google-ads-python | e989464a85f28baca1f28d133994c73759e8b4d6 | a5b6cede64f4d9912ae6ad26927a54e40448c9fe | refs/heads/master | 2021-08-07T20:18:48.618563 | 2020-12-11T09:21:29 | 2020-12-11T09:21:29 | 229,712,514 | 0 | 0 | Apache-2.0 | 2019-12-23T08:44:49 | 2019-12-23T08:44:49 | null | UTF-8 | Python | false | true | 27,203 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v4/proto/resources/ad_group.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v4.proto.common import custom_parameter_pb2 as google_dot_ads_dot_googleads__v4_dot_proto_dot_common_dot_custom__parameter__pb2
from google.ads.google_ads.v4.proto.common import explorer_auto_optimizer_setting_pb2 as google_dot_ads_dot_googleads__v4_dot_proto_dot_common_dot_explorer__auto__optimizer__setting__pb2
from google.ads.google_ads.v4.proto.common import targeting_setting_pb2 as google_dot_ads_dot_googleads__v4_dot_proto_dot_common_dot_targeting__setting__pb2
from google.ads.google_ads.v4.proto.enums import ad_group_ad_rotation_mode_pb2 as google_dot_ads_dot_googleads__v4_dot_proto_dot_enums_dot_ad__group__ad__rotation__mode__pb2
from google.ads.google_ads.v4.proto.enums import ad_group_status_pb2 as google_dot_ads_dot_googleads__v4_dot_proto_dot_enums_dot_ad__group__status__pb2
from google.ads.google_ads.v4.proto.enums import ad_group_type_pb2 as google_dot_ads_dot_googleads__v4_dot_proto_dot_enums_dot_ad__group__type__pb2
from google.ads.google_ads.v4.proto.enums import bidding_source_pb2 as google_dot_ads_dot_googleads__v4_dot_proto_dot_enums_dot_bidding__source__pb2
from google.ads.google_ads.v4.proto.enums import targeting_dimension_pb2 as google_dot_ads_dot_googleads__v4_dot_proto_dot_enums_dot_targeting__dimension__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v4/proto/resources/ad_group.proto',
package='google.ads.googleads.v4.resources',
syntax='proto3',
serialized_options=_b('\n%com.google.ads.googleads.v4.resourcesB\014AdGroupProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v4/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V4.Resources\312\002!Google\\Ads\\GoogleAds\\V4\\Resources\352\002%Google::Ads::GoogleAds::V4::Resources'),
serialized_pb=_b('\n6google/ads/googleads_v4/proto/resources/ad_group.proto\x12!google.ads.googleads.v4.resources\x1a;google/ads/googleads_v4/proto/common/custom_parameter.proto\x1aJgoogle/ads/googleads_v4/proto/common/explorer_auto_optimizer_setting.proto\x1a<google/ads/googleads_v4/proto/common/targeting_setting.proto\x1a\x43google/ads/googleads_v4/proto/enums/ad_group_ad_rotation_mode.proto\x1a\x39google/ads/googleads_v4/proto/enums/ad_group_status.proto\x1a\x37google/ads/googleads_v4/proto/enums/ad_group_type.proto\x1a\x38google/ads/googleads_v4/proto/enums/bidding_source.proto\x1a=google/ads/googleads_v4/proto/enums/targeting_dimension.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1cgoogle/api/annotations.proto\"\xe0\x0f\n\x07\x41\x64Group\x12?\n\rresource_name\x18\x01 \x01(\tB(\xe0\x41\x05\xfa\x41\"\n googleads.googleapis.com/AdGroup\x12,\n\x02id\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x03\xe0\x41\x03\x12*\n\x04name\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12N\n\x06status\x18\x05 \x01(\x0e\x32>.google.ads.googleads.v4.enums.AdGroupStatusEnum.AdGroupStatus\x12M\n\x04type\x18\x0c \x01(\x0e\x32:.google.ads.googleads.v4.enums.AdGroupTypeEnum.AdGroupTypeB\x03\xe0\x41\x05\x12h\n\x10\x61\x64_rotation_mode\x18\x16 \x01(\x0e\x32N.google.ads.googleads.v4.enums.AdGroupAdRotationModeEnum.AdGroupAdRotationMode\x12]\n\rbase_ad_group\x18\x12 \x01(\x0b\x32\x1c.google.protobuf.StringValueB(\xe0\x41\x03\xfa\x41\"\n googleads.googleapis.com/AdGroup\x12;\n\x15tracking_url_template\x18\r \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12N\n\x15url_custom_parameters\x18\x06 \x03(\x0b\x32/.google.ads.googleads.v4.common.CustomParameter\x12Y\n\x08\x63\x61mpaign\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValueB)\xe0\x41\x05\xfa\x41#\n!googleads.googleapis.com/Campaign\x12\x33\n\x0e\x63pc_bid_micros\x18\x0e \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x33\n\x0e\x63pm_bid_micros\x18\x0f \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x36\n\x11target_cpa_micros\x18\x1b \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x38\n\x0e\x63pv_bid_micros\x18\x11 \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x03\xe0\x41\x03\x12\x36\n\x11target_cpm_micros\x18\x1a \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x31\n\x0btarget_roas\x18\x1e \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12;\n\x16percent_cpc_bid_micros\x18\x14 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x65\n\x1f\x65xplorer_auto_optimizer_setting\x18\x15 \x01(\x0b\x32<.google.ads.googleads.v4.common.ExplorerAutoOptimizerSetting\x12n\n\x1c\x64isplay_custom_bid_dimension\x18\x17 \x01(\x0e\x32H.google.ads.googleads.v4.enums.TargetingDimensionEnum.TargetingDimension\x12\x36\n\x10\x66inal_url_suffix\x18\x18 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12K\n\x11targeting_setting\x18\x19 \x01(\x0b\x32\x30.google.ads.googleads.v4.common.TargetingSetting\x12\x45\n\x1b\x65\x66\x66\x65\x63tive_target_cpa_micros\x18\x1c \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x03\xe0\x41\x03\x12h\n\x1b\x65\x66\x66\x65\x63tive_target_cpa_source\x18\x1d \x01(\x0e\x32>.google.ads.googleads.v4.enums.BiddingSourceEnum.BiddingSourceB\x03\xe0\x41\x03\x12@\n\x15\x65\x66\x66\x65\x63tive_target_roas\x18\x1f \x01(\x0b\x32\x1c.google.protobuf.DoubleValueB\x03\xe0\x41\x03\x12i\n\x1c\x65\x66\x66\x65\x63tive_target_roas_source\x18 \x01(\x0e\x32>.google.ads.googleads.v4.enums.BiddingSourceEnum.BiddingSourceB\x03\xe0\x41\x03\x12[\n\x06labels\x18! \x03(\x0b\x32\x1c.google.protobuf.StringValueB-\xe0\x41\x03\xfa\x41\'\n%googleads.googleapis.com/AdGroupLabel:O\xea\x41L\n googleads.googleapis.com/AdGroup\x12(customers/{customer}/adGroups/{ad_group}B\xf9\x01\n%com.google.ads.googleads.v4.resourcesB\x0c\x41\x64GroupProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v4/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V4.Resources\xca\x02!Google\\Ads\\GoogleAds\\V4\\Resources\xea\x02%Google::Ads::GoogleAds::V4::Resourcesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v4_dot_proto_dot_common_dot_custom__parameter__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v4_dot_proto_dot_common_dot_explorer__auto__optimizer__setting__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v4_dot_proto_dot_common_dot_targeting__setting__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v4_dot_proto_dot_enums_dot_ad__group__ad__rotation__mode__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v4_dot_proto_dot_enums_dot_ad__group__status__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v4_dot_proto_dot_enums_dot_ad__group__type__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v4_dot_proto_dot_enums_dot_bidding__source__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v4_dot_proto_dot_enums_dot_targeting__dimension__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_ADGROUP = _descriptor.Descriptor(
name='AdGroup',
full_name='google.ads.googleads.v4.resources.AdGroup',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v4.resources.AdGroup.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\005\372A\"\n googleads.googleapis.com/AdGroup'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='google.ads.googleads.v4.resources.AdGroup.id', index=1,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\003'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='google.ads.googleads.v4.resources.AdGroup.name', index=2,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='google.ads.googleads.v4.resources.AdGroup.status', index=3,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='google.ads.googleads.v4.resources.AdGroup.type', index=4,
number=12, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\005'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ad_rotation_mode', full_name='google.ads.googleads.v4.resources.AdGroup.ad_rotation_mode', index=5,
number=22, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='base_ad_group', full_name='google.ads.googleads.v4.resources.AdGroup.base_ad_group', index=6,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\003\372A\"\n googleads.googleapis.com/AdGroup'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tracking_url_template', full_name='google.ads.googleads.v4.resources.AdGroup.tracking_url_template', index=7,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='url_custom_parameters', full_name='google.ads.googleads.v4.resources.AdGroup.url_custom_parameters', index=8,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='campaign', full_name='google.ads.googleads.v4.resources.AdGroup.campaign', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\005\372A#\n!googleads.googleapis.com/Campaign'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cpc_bid_micros', full_name='google.ads.googleads.v4.resources.AdGroup.cpc_bid_micros', index=10,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cpm_bid_micros', full_name='google.ads.googleads.v4.resources.AdGroup.cpm_bid_micros', index=11,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target_cpa_micros', full_name='google.ads.googleads.v4.resources.AdGroup.target_cpa_micros', index=12,
number=27, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cpv_bid_micros', full_name='google.ads.googleads.v4.resources.AdGroup.cpv_bid_micros', index=13,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\003'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target_cpm_micros', full_name='google.ads.googleads.v4.resources.AdGroup.target_cpm_micros', index=14,
number=26, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target_roas', full_name='google.ads.googleads.v4.resources.AdGroup.target_roas', index=15,
number=30, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='percent_cpc_bid_micros', full_name='google.ads.googleads.v4.resources.AdGroup.percent_cpc_bid_micros', index=16,
number=20, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='explorer_auto_optimizer_setting', full_name='google.ads.googleads.v4.resources.AdGroup.explorer_auto_optimizer_setting', index=17,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='display_custom_bid_dimension', full_name='google.ads.googleads.v4.resources.AdGroup.display_custom_bid_dimension', index=18,
number=23, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='final_url_suffix', full_name='google.ads.googleads.v4.resources.AdGroup.final_url_suffix', index=19,
number=24, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='targeting_setting', full_name='google.ads.googleads.v4.resources.AdGroup.targeting_setting', index=20,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='effective_target_cpa_micros', full_name='google.ads.googleads.v4.resources.AdGroup.effective_target_cpa_micros', index=21,
number=28, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\003'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='effective_target_cpa_source', full_name='google.ads.googleads.v4.resources.AdGroup.effective_target_cpa_source', index=22,
number=29, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\003'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='effective_target_roas', full_name='google.ads.googleads.v4.resources.AdGroup.effective_target_roas', index=23,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\003'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='effective_target_roas_source', full_name='google.ads.googleads.v4.resources.AdGroup.effective_target_roas_source', index=24,
number=32, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\003'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='google.ads.googleads.v4.resources.AdGroup.labels', index=25,
number=33, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\003\372A\'\n%googleads.googleapis.com/AdGroupLabel'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('\352AL\n googleads.googleapis.com/AdGroup\022(customers/{customer}/adGroups/{ad_group}'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=721,
serialized_end=2737,
)
_ADGROUP.fields_by_name['id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_ADGROUP.fields_by_name['name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_ADGROUP.fields_by_name['status'].enum_type = google_dot_ads_dot_googleads__v4_dot_proto_dot_enums_dot_ad__group__status__pb2._ADGROUPSTATUSENUM_ADGROUPSTATUS
_ADGROUP.fields_by_name['type'].enum_type = google_dot_ads_dot_googleads__v4_dot_proto_dot_enums_dot_ad__group__type__pb2._ADGROUPTYPEENUM_ADGROUPTYPE
_ADGROUP.fields_by_name['ad_rotation_mode'].enum_type = google_dot_ads_dot_googleads__v4_dot_proto_dot_enums_dot_ad__group__ad__rotation__mode__pb2._ADGROUPADROTATIONMODEENUM_ADGROUPADROTATIONMODE
_ADGROUP.fields_by_name['base_ad_group'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_ADGROUP.fields_by_name['tracking_url_template'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_ADGROUP.fields_by_name['url_custom_parameters'].message_type = google_dot_ads_dot_googleads__v4_dot_proto_dot_common_dot_custom__parameter__pb2._CUSTOMPARAMETER
_ADGROUP.fields_by_name['campaign'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_ADGROUP.fields_by_name['cpc_bid_micros'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_ADGROUP.fields_by_name['cpm_bid_micros'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_ADGROUP.fields_by_name['target_cpa_micros'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_ADGROUP.fields_by_name['cpv_bid_micros'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_ADGROUP.fields_by_name['target_cpm_micros'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_ADGROUP.fields_by_name['target_roas'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_ADGROUP.fields_by_name['percent_cpc_bid_micros'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_ADGROUP.fields_by_name['explorer_auto_optimizer_setting'].message_type = google_dot_ads_dot_googleads__v4_dot_proto_dot_common_dot_explorer__auto__optimizer__setting__pb2._EXPLORERAUTOOPTIMIZERSETTING
_ADGROUP.fields_by_name['display_custom_bid_dimension'].enum_type = google_dot_ads_dot_googleads__v4_dot_proto_dot_enums_dot_targeting__dimension__pb2._TARGETINGDIMENSIONENUM_TARGETINGDIMENSION
_ADGROUP.fields_by_name['final_url_suffix'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_ADGROUP.fields_by_name['targeting_setting'].message_type = google_dot_ads_dot_googleads__v4_dot_proto_dot_common_dot_targeting__setting__pb2._TARGETINGSETTING
_ADGROUP.fields_by_name['effective_target_cpa_micros'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_ADGROUP.fields_by_name['effective_target_cpa_source'].enum_type = google_dot_ads_dot_googleads__v4_dot_proto_dot_enums_dot_bidding__source__pb2._BIDDINGSOURCEENUM_BIDDINGSOURCE
_ADGROUP.fields_by_name['effective_target_roas'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_ADGROUP.fields_by_name['effective_target_roas_source'].enum_type = google_dot_ads_dot_googleads__v4_dot_proto_dot_enums_dot_bidding__source__pb2._BIDDINGSOURCEENUM_BIDDINGSOURCE
_ADGROUP.fields_by_name['labels'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
DESCRIPTOR.message_types_by_name['AdGroup'] = _ADGROUP
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AdGroup = _reflection.GeneratedProtocolMessageType('AdGroup', (_message.Message,), dict(
DESCRIPTOR = _ADGROUP,
__module__ = 'google.ads.googleads_v4.proto.resources.ad_group_pb2'
,
__doc__ = """An ad group.
Attributes:
resource_name:
Immutable. The resource name of the ad group. Ad group
resource names have the form:
``customers/{customer_id}/adGroups/{ad_group_id}``
id:
Output only. The ID of the ad group.
name:
The name of the ad group. This field is required and should
not be empty when creating new ad groups. It must contain
fewer than 255 UTF-8 full-width characters. It must not
contain any null (code point 0x0), NL line feed (code point
0xA) or carriage return (code point 0xD) characters.
status:
The status of the ad group.
type:
Immutable. The type of the ad group.
ad_rotation_mode:
The ad rotation mode of the ad group.
base_ad_group:
Output only. For draft or experiment ad groups, this field is
the resource name of the base ad group from which this ad
group was created. If a draft or experiment ad group does not
have a base ad group, then this field is null. For base ad
groups, this field equals the ad group resource name. This
field is read-only.
tracking_url_template:
The URL template for constructing a tracking URL.
url_custom_parameters:
The list of mappings used to substitute custom parameter tags
in a ``tracking_url_template``, ``final_urls``, or
``mobile_final_urls``.
campaign:
Immutable. The campaign to which the ad group belongs.
cpc_bid_micros:
The maximum CPC (cost-per-click) bid.
cpm_bid_micros:
The maximum CPM (cost-per-thousand viewable impressions) bid.
target_cpa_micros:
The target CPA (cost-per-acquisition).
cpv_bid_micros:
Output only. The CPV (cost-per-view) bid.
target_cpm_micros:
Average amount in micros that the advertiser is willing to pay
for every thousand times the ad is shown.
target_roas:
The target ROAS (return-on-ad-spend) override. If the ad
group's campaign bidding strategy is a standard Target ROAS
strategy, then this field overrides the target ROAS specified
in the campaign's bidding strategy. Otherwise, this value is
ignored.
percent_cpc_bid_micros:
The percent cpc bid amount, expressed as a fraction of the
advertised price for some good or service. The valid range for
the fraction is [0,1) and the value stored here is 1,000,000
\* [fraction].
explorer_auto_optimizer_setting:
Settings for the Display Campaign Optimizer, initially termed
"Explorer".
display_custom_bid_dimension:
Allows advertisers to specify a targeting dimension on which
to place absolute bids. This is only applicable for campaigns
that target only the display network and not search.
final_url_suffix:
URL template for appending params to Final URL.
targeting_setting:
Setting for targeting related features.
effective_target_cpa_micros:
Output only. The effective target CPA (cost-per-acquisition).
This field is read-only.
effective_target_cpa_source:
Output only. Source of the effective target CPA. This field is
read-only.
effective_target_roas:
Output only. The effective target ROAS (return-on-ad-spend).
This field is read-only.
effective_target_roas_source:
Output only. Source of the effective target ROAS. This field
is read-only.
labels:
Output only. The resource names of labels attached to this ad
group.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v4.resources.AdGroup)
))
_sym_db.RegisterMessage(AdGroup)
DESCRIPTOR._options = None
_ADGROUP.fields_by_name['resource_name']._options = None
_ADGROUP.fields_by_name['id']._options = None
_ADGROUP.fields_by_name['type']._options = None
_ADGROUP.fields_by_name['base_ad_group']._options = None
_ADGROUP.fields_by_name['campaign']._options = None
_ADGROUP.fields_by_name['cpv_bid_micros']._options = None
_ADGROUP.fields_by_name['effective_target_cpa_micros']._options = None
_ADGROUP.fields_by_name['effective_target_cpa_source']._options = None
_ADGROUP.fields_by_name['effective_target_roas']._options = None
_ADGROUP.fields_by_name['effective_target_roas_source']._options = None
_ADGROUP.fields_by_name['labels']._options = None
_ADGROUP._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
efc7ecd6a3329d95f29a04a55031b90530622262 | 52381a4fc02e90ce1fcfffd8d9876d9e8f44c248 | /core/storage/app_feedback_report/gae_models_test.py | 3d85809c3d1398a4612e0373018cc3bd404014b1 | [
"Apache-2.0"
] | permissive | ankita240796/oppia | 18aa1609a0f237ce76142b2a0d3169e830e5bcdd | ba4f072e494fd59df53fecc37e67cea7f9727234 | refs/heads/develop | 2022-07-11T01:11:53.136252 | 2022-06-30T08:55:49 | 2022-06-30T08:55:49 | 160,626,761 | 0 | 0 | Apache-2.0 | 2020-04-28T16:12:26 | 2018-12-06T06:02:18 | Python | UTF-8 | Python | false | false | 23,191 | py | # Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for core.storage.app_feedback_report.gae_models."""
from __future__ import annotations
import datetime
import enum
import types
from core import feconf
from core import utils
from core.platform import models
from core.tests import test_utils
from mypy_imports import app_feedback_report_models, base_models # isort:skip
from typing import List, Any # isort:skip # pylint: disable=unused-import
(base_models, app_feedback_report_models) = models.Registry.import_models(
[models.NAMES.base_model, models.NAMES.app_feedback_report])
class AppFeedbackReportModelTests(test_utils.GenericTestBase):
"""Tests for the AppFeedbackReportModel class."""
PLATFORM_ANDROID = 'android'
PLATFORM_WEB = 'web'
# Timestamp in sec since epoch for Mar 7 2021 21:17:16 UTC.
REPORT_SUBMITTED_TIMESTAMP_1 = datetime.datetime.fromtimestamp(1615151836)
REPORT_SUBMITTED_TIMESTAMP_1_MSEC = (
utils.get_time_in_millisecs(REPORT_SUBMITTED_TIMESTAMP_1))
# Timestamp in sec since epoch for Mar 12 2021 3:22:17 UTC.
REPORT_SUBMITTED_TIMESTAMP_2 = datetime.datetime.fromtimestamp(1615519337)
REPORT_SUBMITTED_TIMESTAMP_2_MSEC = (
utils.get_time_in_millisecs(REPORT_SUBMITTED_TIMESTAMP_2))
# Timestamp in sec since epoch for Mar 19 2021 17:10:36 UTC.
TICKET_CREATION_TIMESTAMP = datetime.datetime.fromtimestamp(1616173836)
TICKET_CREATION_TIMESTAMP_MSEC = (
utils.get_time_in_millisecs(TICKET_CREATION_TIMESTAMP))
TICKET_ID = '%s.%s.%s' % (
'random_hash', int(TICKET_CREATION_TIMESTAMP_MSEC),
'16CharString1234')
REPORT_TYPE_SUGGESTION = 'suggestion'
CATEGORY_OTHER = 'other'
PLATFORM_VERSION = '0.1-alpha-abcdef1234'
DEVICE_COUNTRY_LOCALE_CODE_INDIA = 'in'
ANDROID_DEVICE_MODEL = 'Pixel 4a'
ANDROID_SDK_VERSION = 28
ENTRY_POINT_NAVIGATION_DRAWER = 'navigation_drawer'
TEXT_LANGUAGE_CODE_ENGLISH = 'en'
AUDIO_LANGUAGE_CODE_ENGLISH = 'en'
ANDROID_REPORT_INFO = {
'user_feedback_other_text_input': 'add an admin',
'event_logs': ['event1', 'event2'],
'logcat_logs': ['logcat1', 'logcat2'],
'package_version_code': 1,
'language_locale_code': 'en',
'entry_point_info': {
'entry_point_name': 'crash',
},
'text_size': 'MEDIUM_TEXT_SIZE',
'only_allows_wifi_download_and_update': True,
'automatically_update_topics': False,
'is_curriculum_admin': False
}
WEB_REPORT_INFO = {
'user_feedback_other_text_input': 'add an admin'
}
ANDROID_REPORT_INFO_SCHEMA_VERSION = 1
WEB_REPORT_INFO_SCHEMA_VERSION = 1
def setUp(self) -> None:
"""Set up models in datastore for use in testing."""
super(AppFeedbackReportModelTests, self).setUp()
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL) # type: ignore[no-untyped-call]
self.feedback_report_model = (
app_feedback_report_models.AppFeedbackReportModel(
id='%s.%s.%s' % (
self.PLATFORM_ANDROID,
int(self.REPORT_SUBMITTED_TIMESTAMP_1_MSEC),
'randomInteger123'),
platform=self.PLATFORM_ANDROID,
scrubbed_by=self.user_id,
ticket_id='%s.%s.%s' % (
'random_hash',
int(self.TICKET_CREATION_TIMESTAMP_MSEC),
'16CharString1234'),
submitted_on=self.REPORT_SUBMITTED_TIMESTAMP_1,
local_timezone_offset_hrs=0,
report_type=self.REPORT_TYPE_SUGGESTION,
category=self.CATEGORY_OTHER,
platform_version=self.PLATFORM_VERSION,
android_device_country_locale_code=(
self.DEVICE_COUNTRY_LOCALE_CODE_INDIA),
android_device_model=self.ANDROID_DEVICE_MODEL,
android_sdk_version=self.ANDROID_SDK_VERSION,
entry_point=self.ENTRY_POINT_NAVIGATION_DRAWER,
text_language_code=self.TEXT_LANGUAGE_CODE_ENGLISH,
audio_language_code=self.AUDIO_LANGUAGE_CODE_ENGLISH,
android_report_info=self.ANDROID_REPORT_INFO,
android_report_info_schema_version=(
self.ANDROID_REPORT_INFO_SCHEMA_VERSION)
)
)
self.feedback_report_model.update_timestamps()
self.feedback_report_model.put()
def test_create_and_get_android_report_model(self) -> None:
report_id = (
app_feedback_report_models.AppFeedbackReportModel.generate_id(
self.PLATFORM_ANDROID, self.REPORT_SUBMITTED_TIMESTAMP_2))
app_feedback_report_models.AppFeedbackReportModel.create(
report_id, self.PLATFORM_ANDROID, self.REPORT_SUBMITTED_TIMESTAMP_2,
0, self.REPORT_TYPE_SUGGESTION, self.CATEGORY_OTHER,
self.PLATFORM_VERSION, self.DEVICE_COUNTRY_LOCALE_CODE_INDIA,
self.ANDROID_SDK_VERSION, self.ANDROID_DEVICE_MODEL,
self.ENTRY_POINT_NAVIGATION_DRAWER, None, None, None, None,
self.TEXT_LANGUAGE_CODE_ENGLISH,
self.AUDIO_LANGUAGE_CODE_ENGLISH, self.ANDROID_REPORT_INFO,
None)
report_model = app_feedback_report_models.AppFeedbackReportModel.get(
report_id)
self.assertEqual(report_model.platform, self.PLATFORM_ANDROID)
self.assertEqual(
report_model.submitted_on, self.REPORT_SUBMITTED_TIMESTAMP_2)
self.assertEqual(report_model.report_type, self.REPORT_TYPE_SUGGESTION)
self.assertEqual(report_model.android_report_info_schema_version, 1)
self.assertEqual(report_model.web_report_info, None)
def test_create_and_get_web_report_model(self) -> None:
report_id = (
app_feedback_report_models.AppFeedbackReportModel.generate_id(
self.PLATFORM_WEB, self.REPORT_SUBMITTED_TIMESTAMP_2))
app_feedback_report_models.AppFeedbackReportModel.create(
report_id, self.PLATFORM_WEB, self.REPORT_SUBMITTED_TIMESTAMP_2, 0,
self.REPORT_TYPE_SUGGESTION, self.CATEGORY_OTHER,
self.PLATFORM_VERSION, self.DEVICE_COUNTRY_LOCALE_CODE_INDIA,
self.ANDROID_SDK_VERSION, self.ANDROID_DEVICE_MODEL,
self.ENTRY_POINT_NAVIGATION_DRAWER, None, None, None, None,
self.TEXT_LANGUAGE_CODE_ENGLISH, self.AUDIO_LANGUAGE_CODE_ENGLISH,
None, self.WEB_REPORT_INFO)
report_model = app_feedback_report_models.AppFeedbackReportModel.get(
report_id)
self.assertEqual(report_model.platform, self.PLATFORM_WEB)
self.assertEqual(
report_model.submitted_on, self.REPORT_SUBMITTED_TIMESTAMP_2)
self.assertEqual(report_model.report_type, self.REPORT_TYPE_SUGGESTION)
self.assertEqual(report_model.web_report_info_schema_version, 1)
self.assertEqual(report_model.android_report_info, None)
def test_create_raises_exception_by_mocking_collision(self) -> None:
model_class = app_feedback_report_models.AppFeedbackReportModel
# Test Exception for AppFeedbackReportModel.
with self.assertRaisesRegex( # type: ignore[no-untyped-call]
Exception, 'The id generator for AppFeedbackReportModel is '
'producing too many collisions.'):
# Swap dependent method get_by_id to simulate collision every time.
with self.swap(
app_feedback_report_models.AppFeedbackReportModel,
'get_by_id', types.MethodType(
lambda x, y: True,
app_feedback_report_models.AppFeedbackReportModel)):
report_id = model_class.generate_id(
self.PLATFORM_ANDROID, self.REPORT_SUBMITTED_TIMESTAMP_2)
model_class.create(
report_id, self.PLATFORM_ANDROID,
self.REPORT_SUBMITTED_TIMESTAMP_1, 0,
self.REPORT_TYPE_SUGGESTION, self.CATEGORY_OTHER,
self.PLATFORM_VERSION,
self.DEVICE_COUNTRY_LOCALE_CODE_INDIA,
self.ANDROID_SDK_VERSION, self.ANDROID_DEVICE_MODEL,
self.ENTRY_POINT_NAVIGATION_DRAWER, None, None, None, None,
self.TEXT_LANGUAGE_CODE_ENGLISH,
self.AUDIO_LANGUAGE_CODE_ENGLISH, self.ANDROID_REPORT_INFO,
None)
def test_get_deletion_policy(self) -> None:
model = app_feedback_report_models.AppFeedbackReportModel
self.assertEqual(
model.get_deletion_policy(),
base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE)
def test_export_data_without_scrubber(self) -> None:
self.feedback_report_model.scrubbed_by = 'id'
self.feedback_report_model.update_timestamps()
self.feedback_report_model.put()
exported_data = (
app_feedback_report_models.AppFeedbackReportModel.export_data('id'))
report_id = '%s.%s.%s' % (
self.PLATFORM_ANDROID,
int(self.REPORT_SUBMITTED_TIMESTAMP_1_MSEC),
'randomInteger123')
expected_data = {
report_id: {
'scrubbed_by': None,
'platform': self.PLATFORM_ANDROID,
'ticket_id': self.TICKET_ID,
'submitted_on': utils.get_human_readable_time_string(
self.REPORT_SUBMITTED_TIMESTAMP_1_MSEC),
'local_timezone_offset_hrs': 0,
'report_type': self.REPORT_TYPE_SUGGESTION,
'category': self.CATEGORY_OTHER,
'platform_version': self.PLATFORM_VERSION
}
}
self.assertEqual(exported_data, expected_data)
def test_export_data_with_scrubber(self) -> None:
exported_data = (
app_feedback_report_models.AppFeedbackReportModel.export_data(
self.user_id))
report_id = '%s.%s.%s' % (
self.PLATFORM_ANDROID,
int(self.REPORT_SUBMITTED_TIMESTAMP_1_MSEC),
'randomInteger123')
expected_data = {
report_id: {
'scrubbed_by': self.NEW_USER_USERNAME,
'platform': self.PLATFORM_ANDROID,
'ticket_id': self.TICKET_ID,
'submitted_on': utils.get_human_readable_time_string(
self.REPORT_SUBMITTED_TIMESTAMP_1_MSEC),
'local_timezone_offset_hrs': 0,
'report_type': self.REPORT_TYPE_SUGGESTION,
'category': self.CATEGORY_OTHER,
'platform_version': self.PLATFORM_VERSION
}
}
self.assertEqual(exported_data, expected_data)
def test_get_all_unscrubbed_expiring_report_models(self) -> None:
expired_timestamp = datetime.datetime.utcnow() - (
feconf.APP_FEEDBACK_REPORT_MAXIMUM_LIFESPAN +
datetime.timedelta(days=10))
expired_model = app_feedback_report_models.AppFeedbackReportModel(
id='%s.%s.%s' % (
self.PLATFORM_ANDROID,
int(utils.get_time_in_millisecs(expired_timestamp)),
'randomInteger123'),
platform=self.PLATFORM_ANDROID,
scrubbed_by=None,
ticket_id='%s.%s.%s' % (
'random_hash',
int(self.TICKET_CREATION_TIMESTAMP_MSEC),
'16CharString1234'),
submitted_on=expired_timestamp,
local_timezone_offset_hrs=0,
report_type=self.REPORT_TYPE_SUGGESTION,
category=self.CATEGORY_OTHER,
platform_version=self.PLATFORM_VERSION,
android_device_country_locale_code=(
self.DEVICE_COUNTRY_LOCALE_CODE_INDIA),
android_device_model=self.ANDROID_DEVICE_MODEL,
android_sdk_version=self.ANDROID_SDK_VERSION,
entry_point=self.ENTRY_POINT_NAVIGATION_DRAWER,
text_language_code=self.TEXT_LANGUAGE_CODE_ENGLISH,
audio_language_code=self.AUDIO_LANGUAGE_CODE_ENGLISH,
android_report_info=self.ANDROID_REPORT_INFO,
android_report_info_schema_version=(
self.ANDROID_REPORT_INFO_SCHEMA_VERSION)
)
expired_model.created_on = expired_timestamp
expired_model.put()
model_class = app_feedback_report_models.AppFeedbackReportModel
model_entities = model_class.get_all_unscrubbed_expiring_report_models()
self.assertEqual(len(model_entities), 1)
self.assertEqual(model_entities[0].id, expired_model.id)
def test_get_lowest_supported_role(self) -> None:
model = app_feedback_report_models.AppFeedbackReportModel
self.assertEqual(
model.get_lowest_supported_role(), feconf.ROLE_ID_MODERATOR)
def test_has_reference_to_user_id(self) -> None:
model_class = app_feedback_report_models.AppFeedbackReportModel
# The only user references will be those who have scrubbed a report.
report_id = '%s.%s.%s' % (
self.PLATFORM_ANDROID,
int(self.REPORT_SUBMITTED_TIMESTAMP_1_MSEC),
'randomInteger123')
model_entity = model_class.get(report_id)
model_entity.scrubbed_by = 'scrubber_user'
model_entity.update_timestamps()
model_entity.put()
self.assertTrue(model_class.has_reference_to_user_id('scrubber_user'))
self.assertFalse(model_class.has_reference_to_user_id('id_x'))
def test_get_filter_options_with_invalid_field_throws_exception(
self) -> None:
model_class = app_feedback_report_models.AppFeedbackReportModel
class InvalidFilter(enum.Enum):
"""Invalid filter."""
INVALID_FIELD = 'invalid_field'
with self.assertRaisesRegex( # type: ignore[no-untyped-call]
utils.InvalidInputException,
'The field %s is not a valid field to filter reports on' % (
InvalidFilter.INVALID_FIELD.name)
):
with self.swap(
model_class, 'query',
self._mock_query_filters_returns_empy_list):
# Using type ignore[arg-type] because we passes arg of type
# InvalidFilter to type class filter_field_names. This is done
# to ensure that InvalidInputException is thrown.
model_class.get_filter_options_for_field(
InvalidFilter.INVALID_FIELD) # type: ignore[arg-type]
def _mock_query_filters_returns_empy_list(
self, projection: bool, distinct: bool) -> List[Any]: # pylint: disable=unused-argument
"""Mock the model query to test for an invalid filter field. Named
parameters 'projection' and 'distinct' are required to mock the
query function.
"""
return []
class AppFeedbackReportTicketModelTests(test_utils.GenericTestBase):
"""Tests for the AppFeedbackReportTicketModel class."""
# Timestamp in sec since epoch for Mar 7 2021 21:17:16 UTC.
REPORT_SUBMITTED_TIMESTAMP = datetime.datetime.fromtimestamp(1615151836)
REPORT_SUBMITTED_TIMESTAMP_MSEC = utils.get_time_in_millisecs(
REPORT_SUBMITTED_TIMESTAMP)
# Timestamp in sec since epoch for Mar 7 2021 21:17:16 UTC.
NEWEST_REPORT_TIMESTAMP = datetime.datetime.fromtimestamp(1615151836)
# Timestamp in sec since epoch for Mar 19 2021 17:10:36 UTC.
TICKET_CREATION_TIMESTAMP = datetime.datetime.fromtimestamp(1616173836)
TICKET_CREATION_TIMESTAMP_MSEC = utils.get_time_in_millisecs(
TICKET_CREATION_TIMESTAMP)
PLATFORM = 'android'
PLATFORM_VERSION = '0.1-alpha-abcdef1234'
TICKET_NAME = 'example ticket name'
TICKET_ID = '%s.%s.%s' % (
'random_hash', int(TICKET_CREATION_TIMESTAMP_MSEC),
'16CharString1234')
REPORT_IDS = ['%s.%s.%s' % (
PLATFORM, int(REPORT_SUBMITTED_TIMESTAMP_MSEC),
'randomInteger123')]
def test_create_and_get_ticket_model(self) -> None:
ticket_id = (
app_feedback_report_models.AppFeedbackReportTicketModel.generate_id(
self.TICKET_NAME))
app_feedback_report_models.AppFeedbackReportTicketModel.create(
entity_id=ticket_id, ticket_name=self.TICKET_NAME,
platform=self.PLATFORM, github_issue_repo_name=None,
github_issue_number=None,
newest_report_timestamp=self.NEWEST_REPORT_TIMESTAMP,
report_ids=self.REPORT_IDS)
ticket_model = (
app_feedback_report_models.AppFeedbackReportTicketModel.get(
ticket_id))
self.assertEqual(ticket_model.id, ticket_id)
self.assertEqual(ticket_model.platform, self.PLATFORM)
self.assertEqual(
ticket_model.newest_report_timestamp, self.NEWEST_REPORT_TIMESTAMP)
self.assertEqual(ticket_model.ticket_name, self.TICKET_NAME)
self.assertEqual(ticket_model.report_ids, self.REPORT_IDS)
def test_create_raises_exception_by_mocking_collision(self) -> None:
model_class = app_feedback_report_models.AppFeedbackReportTicketModel
# Test Exception for AppFeedbackReportTicketModel.
with self.assertRaisesRegex( # type: ignore[no-untyped-call]
Exception,
'The id generator for AppFeedbackReportTicketModel is producing too'
'many collisions.'
):
# Swap dependent method get_by_id to simulate collision every time.
with self.swap(model_class, 'get_by_id', types.MethodType(
lambda x, y: True, model_class)):
ticket_id = model_class.generate_id(self.TICKET_NAME)
model_class.create(
entity_id=ticket_id, ticket_name=self.TICKET_NAME,
platform=self.PLATFORM, github_issue_repo_name=None,
github_issue_number=None,
newest_report_timestamp=self.NEWEST_REPORT_TIMESTAMP,
report_ids=self.REPORT_IDS)
def test_get_deletion_policy(self) -> None:
model = app_feedback_report_models.AppFeedbackReportTicketModel()
self.assertEqual(
model.get_deletion_policy(),
base_models.DELETION_POLICY.NOT_APPLICABLE)
def test_get_lowest_supported_role(self) -> None:
model = app_feedback_report_models.AppFeedbackReportTicketModel
self.assertEqual(
model.get_lowest_supported_role(), feconf.ROLE_ID_MODERATOR)
class AppFeedbackReportStatsModelTests(test_utils.GenericTestBase):
"""Tests for the AppFeedbackReportStatsModel class."""
# Timestamp in sec since epoch for Mar 19 2021 17:10:36 UTC.
TICKET_CREATION_TIMESTAMP = datetime.datetime.fromtimestamp(1616173836)
TICKET_CREATION_TIMESTAMP_MSEC = (
utils.get_time_in_millisecs(TICKET_CREATION_TIMESTAMP))
TICKET_ID = '%s.%s.%s' % (
'random_hash', int(TICKET_CREATION_TIMESTAMP_MSEC),
'16CharString1234')
# Timestamp date in sec since epoch for Mar 19 2021 UTC.
STATS_DATE = datetime.date.fromtimestamp(1616173836)
DAILY_STATS = {
'report_type': {
'suggestion': 1, 'issue': 1, 'crash': 1}}
TOTAL_REPORTS_SUBMITTED = 3
def test_create_and_get_stats_model(self) -> None:
entity_id = (
app_feedback_report_models.AppFeedbackReportStatsModel.calculate_id(
'android', self.TICKET_ID, self.STATS_DATE))
app_feedback_report_models.AppFeedbackReportStatsModel.create(
entity_id=entity_id,
platform='android',
ticket_id=self.TICKET_ID,
stats_tracking_date=self.STATS_DATE,
total_reports_submitted=self.TOTAL_REPORTS_SUBMITTED,
daily_param_stats=self.DAILY_STATS)
stats_model = (
app_feedback_report_models.AppFeedbackReportStatsModel.get_by_id(
entity_id))
# Ruling out the possibility of None for mypy type checking.
assert stats_model is not None
self.assertEqual(stats_model.id, '%s:%s:%s' % (
'android', self.TICKET_ID, self.STATS_DATE.isoformat()))
self.assertEqual(stats_model.platform, 'android')
self.assertEqual(
stats_model.stats_tracking_date, self.STATS_DATE)
self.assertEqual(
stats_model.total_reports_submitted, self.TOTAL_REPORTS_SUBMITTED)
self.assertEqual(stats_model.daily_param_stats, self.DAILY_STATS)
def test_get_id_on_same_ticket_produces_same_id(self) -> None:
model_class = (
app_feedback_report_models.AppFeedbackReportStatsModel)
entity_id = model_class.calculate_id(
'android', self.TICKET_ID, self.STATS_DATE)
entity_id_copy = model_class.calculate_id(
'android', self.TICKET_ID, self.STATS_DATE)
self.assertEqual(entity_id, entity_id_copy)
def test_get_stats_for_ticket(self) -> None:
entity_id = (
app_feedback_report_models.AppFeedbackReportStatsModel.calculate_id(
'android', self.TICKET_ID, self.STATS_DATE))
app_feedback_report_models.AppFeedbackReportStatsModel.create(
entity_id=entity_id,
platform='android',
ticket_id=self.TICKET_ID,
total_reports_submitted=self.TOTAL_REPORTS_SUBMITTED,
stats_tracking_date=self.STATS_DATE,
daily_param_stats=self.DAILY_STATS)
expected_stats_model = (
app_feedback_report_models.AppFeedbackReportStatsModel.get_by_id(
entity_id))
stats_model_class = (
app_feedback_report_models.AppFeedbackReportStatsModel)
stats_models = (
stats_model_class.get_stats_for_ticket(self.TICKET_ID))
self.assertEqual(len(stats_models), 1)
self.assertEqual(stats_models[0].id, entity_id)
self.assertEqual(stats_models[0], expected_stats_model)
def test_get_deletion_policy(self) -> None:
model = app_feedback_report_models.AppFeedbackReportStatsModel()
self.assertEqual(
model.get_deletion_policy(),
base_models.DELETION_POLICY.NOT_APPLICABLE)
def test_get_lowest_supported_role(self) -> None:
model = app_feedback_report_models.AppFeedbackReportStatsModel
self.assertEqual(
model.get_lowest_supported_role(),
feconf.ROLE_ID_MODERATOR)
| [
"[email protected]"
] | |
b59a65aaf741f7146a41fec472f14aee29ef80fa | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02627/s326133719.py | ea89387977f0185b18128a2c9ae93915afa28952 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | from string import ascii_lowercase
a = input()
if a in ascii_lowercase:
print("a")
else:
print("A") | [
"[email protected]"
] | |
c469f6d0359884d8d16ed851a6af1e7f39b15f42 | 6f04b7f8fd55fffb54ce4c78049812655b8c176b | /chap03_GroupApply/lecture/step02_groupby_plot_선생님.py | a33aaf98830f6e875d4ec23df91b81e5c56e0c20 | [] | no_license | Elly-bang/Python-ll | 71092507b719e1532675f8bab489be3f7366c1de | 2658de214cc4a9dd68ad35d82202b59b3129e5af | refs/heads/master | 2022-11-09T18:11:55.449732 | 2020-06-30T06:57:11 | 2020-06-30T06:57:11 | 276,021,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,962 | py | # -*- coding: utf-8 -*-
"""
집단변수 기준 자료 분석
- subset 생성
- group 객체 생성
- 시각화
"""
import pandas as pd
# 1. dataset load
wine = pd.read_csv('C:/ITWILL/4_Python-II/data/winequality-both.csv')
wine.info() # type, quality
# 칼럼명 변경 : 공백 -> '_' 교체
wine.columns = wine.columns.str.replace(' ', '_')
wine.info()
# RangeIndex: 6497 entries, 0 to 6496
# Data columns (total 13 columns)
# 집단변수 확인
wine['type'].unique() # ['red', 'white']
wine.quality.unique() # [5, 6, 7, 4, 8, 3, 9]
# 2. subset 생성
# 1) type 칼럼 : DataFrame(2차원)
red_wine = wine.loc[wine['type']=='red'] #[row, col]
red_wine.info()
# Int64Index: 1599 entries, 0 to 1598
# Data columns (total 13 columns):
red_wine.shape # (1599, 13)
# 2) type(행) vs quality(열) : Series(1차원)
red_quality = wine.loc[wine['type']=='red', 'quality']#[행, 열]
type(red_quality) # pandas.core.series.Series
red_quality.shape # (1599,)
white_quality = wine.loc[wine['type']=='white', 'quality']#[행, 열]
type(white_quality) # pandas.core.series.Series
white_quality.shape # (4898,)
# 3. group 객체 생성 : 집단변수 2개 -> 11변수 그룹화
# 형식) DF.groupby(['칼럼1', '칼럼2'])
wine_grp = wine.groupby(['type', 'quality'])
# 각 그룹의 빈도수
wine_grp.size()
'''
type quality
red 3 10
4 53
5 681
6 638
7 199
8 18
white 3 20
4 163
'''
# 1d -> 2d : 교차분할표
grp_2d = wine_grp.size().unstack()
grp_2d
'''
quality 3 4 5 6 7 8 9
type
red 10.0 53.0 681.0 638.0 199.0 18.0 NaN
white 20.0 163.0 1457.0 2198.0 880.0 175.0 5.0
'''
# 교차분할표
tab = pd.crosstab(wine['type'], wine['quality']) # (index=행, columns=열)
tab
'''
quality 3 4 5 6 7 8 9
type
red 10 53 681 638 199 18 0
white 20 163 1457 2198 880 175 5
'''
# 4. group 객체 시각화
import matplotlib.pyplot as plt
type(grp_2d) # pandas.core.frame.DataFrame
# 누적형 가로막대
grp_2d.plot(kind='barh',
title='type vs quality',
stacked=True)
plt.show()
# 5. wine 종류(집단변수) vs 알콜(연속형) 통계량
wine_grp = wine.groupby('type') # 집단변수 1개 -> 12개 변수 그룹화
# 각 집단별 알콜 요약통계량
wine_grp['alcohol'].describe()
'''
count mean std min 25% 50% 75% max
type
red 1599.0 10.422983 1.065668 8.4 9.5 10.2 11.1 14.9
white 4898.0 10.514267 1.230621 8.0 9.5 10.4 11.4 14.2
'''
| [
"[email protected]"
] | |
03ae5a477c8f067d8cb700f67401521690fd068d | eda9187adfd53c03f55207ad05d09d2d118baa4f | /algo/pca/pca_old.py | 264da244de8c2b727e5ca60a969c58a436681e39 | [] | no_license | HuiZhaozh/python_tutorials | 168761c9d21ad127a604512d7c6c6b38b4faa3c7 | bde4245741081656875bcba2e4e4fcb6b711a3d9 | refs/heads/master | 2023-07-07T20:36:20.137647 | 2020-04-24T07:18:25 | 2020-04-24T07:18:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,519 | py | # -*- coding:utf-8 -*-
# /usr/bin/python
'''
@Author: Yan Errol @Email:[email protected]
@Date: 2019-06-09 23:59
@Describe:
@Evn:
'''
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import numpy as np
# A value we picked to always display the same results
# Feel free to change this to any value greater than 0 view different random value outcomes
seed = 9000
# We're using a seeded random state so we always get the same outcome
seeded_state = np.random.RandomState(seed=seed)
# Returns a random 150 points (x, y pairs) in a gaussian distribution,
# IE most of the points fall close to the average with a few outliers
rand_points = seeded_state.randn(150, 2)
# The @ operator performs matrix multiplication, and serves to bring
# our gaussian distribution points closer together
points = rand_points @ seeded_state.rand(2, 2)
x = points[:, 0]
y = points[:, 1]
# Now we have a sample dataset of 150 points to perform PCA on, so
# go ahead and display this in a plot.
plt.scatter(x, y, alpha=0.5)
plt.title("Sample Dataset")
print("Plotting our created dataset...\n")
print("Points:")
for p in points[:10, :]:
print("({:7.4f}, {:7.4f})".format(p[0], p[1]))
print("...\n")
plt.show()
# Find two principal components from our given dataset
pca = PCA(n_components = 2)
pca.fit(points)
# Once we are fitted, we have access to inner mean_, components_, and explained_variance_ variables
# Use these to add some arrows to our plot
plt.scatter(x, y, alpha=0.5)
plt.title("Sample Dataset with Principal Component Lines")
for var, component in zip(pca.explained_variance_, pca.components_):
plt.annotate(
"",
component * np.sqrt(var) * 2 + pca.mean_,
pca.mean_
)
print("Plotting our calculated principal components...\n")
plt.show()
# Reduce the dimensionality of our data using a PCA transformation
pca = PCA(n_components = 1)
transformed_points = pca.fit_transform(points)
# Note that all the inverse transformation does is transforms the data to its original space.
# In practice, this is unnecessary. For this example, all data would be along the x axis.
# We use it here for visualization purposes
inverse = pca.inverse_transform(transformed_points)
t_x = inverse[:, 0]
t_y = inverse[:, 0]
# Plot the original and transformed data sets
plt.scatter(x, y, alpha=0.3)
plt.scatter(t_x, t_y, alpha=0.7)
plt.title("Sample Dataset (Blue) and Transformed Dataset (Orange)")
print("Plotting our dataset with a dimensionality reduction...")
plt.show()
| [
"[email protected]"
] | |
c3029c19a6c3b697bb29649019096a2ef9384915 | 521efcd158f4c69a686ed1c63dd8e4b0b68cc011 | /airflow/operators/datetime.py | 47021c1730952719ea17c0bf05c4778c8d57ae5f | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | coutureai/RaWorkflowOrchestrator | 33fd8e253bfea2f9a82bb122ca79e8cf9dffb003 | cd3ea2579dff7bbab0d6235fcdeba2bb9edfc01f | refs/heads/main | 2022-10-01T06:24:18.560652 | 2021-12-29T04:52:56 | 2021-12-29T04:52:56 | 184,547,783 | 5 | 12 | Apache-2.0 | 2022-11-04T00:02:55 | 2019-05-02T08:38:38 | Python | UTF-8 | Python | false | false | 4,632 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
from typing import Iterable, Union
from airflow.exceptions import AirflowException
from airflow.operators.branch import BaseBranchOperator
from airflow.utils import timezone
from airflow.utils.context import Context
class BranchDateTimeOperator(BaseBranchOperator):
"""
Branches into one of two lists of tasks depending on the current datetime.
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BranchDateTimeOperator`
True branch will be returned when ``datetime.datetime.now()`` falls below
``target_upper`` and above ``target_lower``.
:param follow_task_ids_if_true: task id or task ids to follow if
``datetime.datetime.now()`` falls above target_lower and below ``target_upper``.
:type follow_task_ids_if_true: str or list[str]
:param follow_task_ids_if_false: task id or task ids to follow if
``datetime.datetime.now()`` falls below target_lower or above ``target_upper``.
:type follow_task_ids_if_false: str or list[str]
:param target_lower: target lower bound.
:type target_lower: Optional[datetime.datetime]
:param target_upper: target upper bound.
:type target_upper: Optional[datetime.datetime]
:param use_task_execution_date: If ``True``, uses task's execution day to compare with targets.
Execution date is useful for backfilling. If ``False``, uses system's date.
:type use_task_execution_date: bool
"""
def __init__(
self,
*,
follow_task_ids_if_true: Union[str, Iterable[str]],
follow_task_ids_if_false: Union[str, Iterable[str]],
target_lower: Union[datetime.datetime, datetime.time, None],
target_upper: Union[datetime.datetime, datetime.time, None],
use_task_execution_date: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
if target_lower is None and target_upper is None:
raise AirflowException(
"Both target_upper and target_lower are None. At least one "
"must be defined to be compared to the current datetime"
)
self.target_lower = target_lower
self.target_upper = target_upper
self.follow_task_ids_if_true = follow_task_ids_if_true
self.follow_task_ids_if_false = follow_task_ids_if_false
self.use_task_execution_date = use_task_execution_date
def choose_branch(self, context: Context) -> Union[str, Iterable[str]]:
if self.use_task_execution_date is True:
now = timezone.make_naive(context["logical_date"], self.dag.timezone)
else:
now = timezone.make_naive(timezone.utcnow(), self.dag.timezone)
lower, upper = target_times_as_dates(now, self.target_lower, self.target_upper)
if upper is not None and upper < now:
return self.follow_task_ids_if_false
if lower is not None and lower > now:
return self.follow_task_ids_if_false
return self.follow_task_ids_if_true
def target_times_as_dates(
base_date: datetime.datetime,
lower: Union[datetime.datetime, datetime.time, None],
upper: Union[datetime.datetime, datetime.time, None],
):
"""Ensures upper and lower time targets are datetimes by combining them with base_date"""
if isinstance(lower, datetime.datetime) and isinstance(upper, datetime.datetime):
return lower, upper
if lower is not None and isinstance(lower, datetime.time):
lower = datetime.datetime.combine(base_date, lower)
if upper is not None and isinstance(upper, datetime.time):
upper = datetime.datetime.combine(base_date, upper)
if lower is None or upper is None:
return lower, upper
if upper < lower:
upper += datetime.timedelta(days=1)
return lower, upper
| [
"[email protected]"
] | |
734b6a332d6f0af9cd41c64282aff3d00bb8662f | 461bffdd97ba507b29f1fbf6f9af1800f0e241f6 | /pytext/metric_reporters/classification_metric_reporter.py | 1e3ced78d285a9bff886349e7b06f32ac39129b1 | [
"BSD-3-Clause"
] | permissive | Erica-Liu/pytext | d347e1327254bbe746c491fd8002bcc2e29d82a9 | 0a77e34e555750311ede54514c3c85b133b258f3 | refs/heads/master | 2020-06-16T02:49:21.589774 | 2019-07-05T18:25:52 | 2019-07-05T18:33:55 | 195,459,270 | 0 | 0 | NOASSERTION | 2019-07-05T19:38:34 | 2019-07-05T19:38:34 | null | UTF-8 | Python | false | false | 6,254 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from enum import Enum
from typing import List, Optional
from pytext.common.constants import Stage
from pytext.data import CommonMetadata
from pytext.metrics import (
LabelListPrediction,
LabelPrediction,
compute_classification_metrics,
compute_multi_label_classification_metrics,
)
from .channel import Channel, ConsoleChannel, FileChannel
from .metric_reporter import MetricReporter
META_LABEL_NAMES = "label_names"
class IntentModelChannel(FileChannel):
def get_title(self):
return ("predicted", "actual", "scores_str", "text")
def gen_content(self, metrics, loss, preds, targets, scores, contexts):
for i in range(len(preds)):
yield [
preds[i],
targets[i],
",".join([f"{s:.2f}" for s in scores[i]]),
contexts["utterance"][i],
]
class ComparableClassificationMetric(Enum):
ACCURACY = "accuracy"
ROC_AUC = "roc_auc"
MCC = "mcc"
MACRO_F1 = "macro_f1"
LABEL_F1 = "label_f1"
LABEL_AVG_PRECISION = "label_avg_precision"
LABEL_ROC_AUC = "label_roc_auc"
# use negative because the reporter's lower_is_better value is False
NEGATIVE_LOSS = "negative_loss"
class ClassificationMetricReporter(MetricReporter):
__EXPANSIBLE__ = True
class Config(MetricReporter.Config):
model_select_metric: ComparableClassificationMetric = (
ComparableClassificationMetric.ACCURACY
)
target_label: Optional[str] = None
#: These column names correspond to raw input data columns. Text in these
#: columns (usually just 1 column) will be concatenated and output in
#: the IntentModelChannel as an evaluation tsv.
text_column_names: List[str] = ["text"]
def __init__(
self,
label_names: List[str],
channels: List[Channel],
model_select_metric: ComparableClassificationMetric = (
ComparableClassificationMetric.ACCURACY
),
target_label: Optional[str] = None,
text_column_names: List[str] = Config.text_column_names,
) -> None:
super().__init__(channels)
self.label_names = label_names
self.model_select_metric = model_select_metric
self.target_label = target_label
self.text_column_names = text_column_names
@classmethod
def from_config(cls, config, meta: CommonMetadata = None, tensorizers=None):
# TODO: refactor metric reporting and remove this hack
if tensorizers:
labels = list(tensorizers["labels"].vocab)
else:
labels = meta.target.vocab.itos
return cls.from_config_and_label_names(config, labels)
@classmethod
def from_config_and_label_names(cls, config, label_names: List[str]):
if config.model_select_metric in (
ComparableClassificationMetric.LABEL_F1,
ComparableClassificationMetric.LABEL_AVG_PRECISION,
ComparableClassificationMetric.LABEL_ROC_AUC,
):
assert config.target_label is not None
assert config.target_label in label_names
if config.model_select_metric in (
ComparableClassificationMetric.ROC_AUC,
ComparableClassificationMetric.MCC,
):
assert len(label_names) == 2
return cls(
label_names,
[ConsoleChannel(), IntentModelChannel((Stage.TEST,), config.output_path)],
config.model_select_metric,
config.target_label,
config.text_column_names,
)
def batch_context(self, raw_batch, batch):
context = super().batch_context(raw_batch, batch)
context["utterance"] = [
" | ".join(str(row[column_name]) for column_name in self.text_column_names)
for row in raw_batch
]
return context
def calculate_metric(self):
return compute_classification_metrics(
[
LabelPrediction(scores, pred, expect)
for scores, pred, expect in zip(
self.all_scores, self.all_preds, self.all_targets
)
],
self.label_names,
self.calculate_loss(),
)
def get_meta(self):
return {META_LABEL_NAMES: self.label_names}
def get_model_select_metric(self, metrics):
if self.model_select_metric == ComparableClassificationMetric.ACCURACY:
metric = metrics.accuracy
elif self.model_select_metric == ComparableClassificationMetric.ROC_AUC:
metric = metrics.roc_auc
elif self.model_select_metric == ComparableClassificationMetric.MCC:
metric = metrics.mcc
elif self.model_select_metric == ComparableClassificationMetric.MACRO_F1:
metric = metrics.macro_prf1_metrics.macro_scores.f1
elif self.model_select_metric == ComparableClassificationMetric.LABEL_F1:
metric = metrics.macro_prf1_metrics.per_label_scores[self.target_label].f1
elif (
self.model_select_metric
== ComparableClassificationMetric.LABEL_AVG_PRECISION
):
metric = metrics.per_label_soft_scores[self.target_label].average_precision
elif self.model_select_metric == ComparableClassificationMetric.LABEL_ROC_AUC:
metric = metrics.per_label_soft_scores[self.target_label].roc_auc
elif self.model_select_metric == ComparableClassificationMetric.NEGATIVE_LOSS:
metric = -metrics.loss
else:
raise ValueError(f"unknown metric: {self.model_select_metric}")
assert metric is not None
return metric
class MultiLabelClassificationMetricReporter(ClassificationMetricReporter):
def calculate_metric(self):
return compute_multi_label_classification_metrics(
[
LabelListPrediction(scores, pred, expect)
for scores, pred, expect in zip(
self.all_scores, self.all_preds, self.all_targets
)
],
self.label_names,
self.calculate_loss(),
)
| [
"[email protected]"
] | |
8f6c010d69c13e262cdd609efe3ac4b6009f38d3 | 6dae31f10260e39feae9d268e3ebe6d23146575a | /spm/bin_deep_surveys/run_stellarpop_miles_deep2_kroupa | fc11bb5287636c2c426dae12945d749d5984c5b1 | [
"CC0-1.0"
] | permissive | JohanComparat/pySU | e55eba92f0660e733468bce618595a03dc25a3d2 | 4169e11414be661dc0c01c774e64fb8ce6242825 | refs/heads/master | 2021-12-25T11:06:04.315554 | 2021-10-11T12:03:22 | 2021-10-11T12:03:22 | 44,340,565 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,277 | #! /usr/bin/env python
import sys
from os.path import join
import os
import time
import numpy as np
import glob
import astropy.cosmology as co
cosmo = co.Planck13
import astropy.io.fits as fits
# for one galaxy spectrum
import GalaxySpectrumFIREFLY as gs
import StellarPopulationModel as spm
catalog=fits.open(join(os.environ['DEEP2_DIR'], "catalogs", "zcat.deep2.dr4.v4.LFcatalogTC.Planck15.fits"))[1].data
outputFolder = join( os.environ['DEEP2_DIR'], 'stellarpop-m11-kroupa-miles', 'stellarpop')
def runSpec(catalog_entry):
print catalog_entry['ZBEST'], catalog_entry['RA'], catalog_entry['DEC']
t0=time.time()
mask=str(catalog_entry['MASK'])
objno=str(catalog_entry['OBJNO'])
path_to_spectrum = glob.glob(join(os.environ['DEEP2_DIR'], 'spectra', mask, '*', '*' + objno + '*_fc_tc.dat'))
if len(path_to_spectrum)>=1:
try:
spec=gs.GalaxySpectrumFIREFLY("-", milky_way_reddening=True)
spec.openObservedDEEP2pectrum(catalog_entry)
ageMax = np.log10(cosmo.age(spec.redshift).value*1e9)
if spec.redshift>0.01 and spec.redshift < 1.7 :
model = spm.StellarPopulationModel(spec, join(outputFolder , 'spFly-deep2-'+mask+'-'+objno ), cosmo, models = 'm11', model_libs = ['MILES'], imfs = ['kr'], age_limits = [6,10], downgrade_models = True, data_wave_medium = 'air', Z_limits = [-3.,1.],suffix="-kr.fits", use_downgraded_models = True)
try :
model.fit_models_to_data()
#print( model.averages )
except (ValueError):
pass
print "time used =", time.time()-t0 ,"seconds"
except (IndexError):
pass
for catalog_entry in catalog[::-1]:
mask=str(catalog_entry['MASK'])
objno=str(catalog_entry['OBJNO'])
if os.path.isfile(join(outputFolder , 'spFly-deep2-'+mask+'-'+objno +"-kr.fits")):
print "pass", join(outputFolder , 'spFly-deep2-'+mask+'-'+objno +"-kr.fits")
else:
runSpec(catalog_entry)
sys.exit()
n_fc_tc = n.zeros_like(catalog['ZBEST'])
for ii, catalog_entry in enumerate(catalog):
mask=str(catalog_entry['MASK'])
objno=str(catalog_entry['OBJNO'])
path_to_spectrum = glob.glob(join(os.environ['DEEP2_DIR'], 'spectra', mask, '*', '*' + objno + '*_fc_tc.dat'))
n_fc_tc[ii] = len(path_to_spectrum )
ok=(catalog['ZBEST']>0.01)&(catalog['ZBEST']<1.7)&(n_fc_tc>=1)
print len(catalog), len(catalog[ok]) | [
"[email protected]"
] | ||
ff6f46df45a62d02b5d3eb10ff5fa6488d3aca62 | ea01ed735850bf61101b869b1df618d3c09c2aa3 | /python基础/network_programming/ftp_task/ftp/conf/settings.py | fe1097cb50a4b2bf3c4804ce40907ffed75bb71a | [] | no_license | liuzhipeng17/python-common | 867c49ac08719fabda371765d1f9e42f6dd289b9 | fb44da203d4e3a8304d9fe6205e60c71d3a620d8 | refs/heads/master | 2021-09-27T10:39:45.178135 | 2018-11-08T01:49:33 | 2018-11-08T01:49:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | # -*- coding: utf-8 -*-
import os.path
_project_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
USER_DB_PATH = os.path.join(_project_path, 'db', 'userdb.ini')
ENCODING = 'utf-8'
MAX_BUFFER_SIZE = 1024
USER_BASE_PATH = os.path.join(_project_path, 'dir', 'home')
BASE_DIR = os.path.join(_project_path, 'dir')
USER_DOWNLOAD_BASE_DIR = os.path.join(_project_path, 'dir', 'downloads')
USER_UPLOAD_BASE_DIR = os.path.join(_project_path, 'dir', 'uploads')
STATUS_CODE = {
200 : "Task finished",
250 : "Invalid cmd format, e.g: {'action':'get','filename':'tests.py','size':344}",
251 : "Invalid cmd ",
252 : "Invalid auth data",
253 : "Wrong username or password",
254 : "Passed authentication",
255 : "Filename doesn't provided",
256 : "File doesn't exist on server",
257 : "ready to send file",
258 : "md5 verification",
259 : "path doesn't exist on server",
260 : "path changed",
261 : "send File line",
262 : "File has exist on server",
263 : "Put empty file",
264 : "Put not null file",
265 : "Get empty file",
266 : "Path access permitted or Path not exist",
267 : "pwd invalid cmd arguments",
268 : "pwd pass",
269 : "permitted putting same-name file unless continue situation"
}
| [
"[email protected]"
] | |
e1550eadd9cc69970c6b6044d39bd284e1baef25 | 474525154a4e1d48ef5242d1f44164d05399b145 | /spinoffs/oryx/oryx/experimental/nn/function.py | b9d20e453f86199f85885faeeef667bb5300a2ac | [
"Apache-2.0"
] | permissive | svshivapuja/probability | 9855737790f74a39169688fbfec9671deef804d9 | af7ccb22d972329633530c3b754ed1f49472f6a7 | refs/heads/main | 2023-07-17T04:14:53.703622 | 2021-08-30T17:47:06 | 2021-08-30T17:47:06 | 400,983,015 | 1 | 0 | Apache-2.0 | 2021-08-29T07:51:29 | 2021-08-29T07:51:29 | null | UTF-8 | Python | false | false | 1,863 | py | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Lint as: python3
"""Registers custom rules for neural networks in the stateful function API.
The Oryx state API enables having a custom unzip rules when `init`-ing a
function. We use this for neural networks to thread kwargs through the Jaxpr
that is created when unzipping a function. This module implements this by first
replacing instances of `layer_cau` with a `FlatPrimitive`s, which avoids
using a call primitive, which we would be difficult to pass new keyword
arguments into. We can more easily override the behavior of a regular primitive.
"""
from jax import tree_util
from oryx.core import state
from oryx.experimental.nn import base
__all__ = [
]
def layer_cau_kwargs_rule(*flat_args, num_consts, in_tree, kwargs, **_):
"""Custom kwargs rule for layer_cau primitive."""
flat_args = flat_args[num_consts:]
layer, *args = tree_util.tree_unflatten(in_tree, flat_args)
kwargs = dict(kwargs)
has_rng = kwargs.pop('has_rng', False)
if has_rng:
rng, args = args[0], args[1:]
kwargs = dict(kwargs, rng=rng)
ans = layer.call_and_update(*args, **kwargs)
return tree_util.tree_leaves(ans)
state.kwargs_rules[base.layer_cau_p] = layer_cau_kwargs_rule
| [
"[email protected]"
] | |
b55ecc78784e9edeb59f797fac7f6750b1ccd7e5 | 79c0358277a5f6ae231d89ee4476cb1facd00e50 | /extra/desktop/gnome/addons/gnome-color-manager/actions.py | c37eddcb3525032b734864d2c7b456d5bc8496bb | [] | no_license | mrust1/PisiLinux | a139dbc9f8d3d61ebec38d08f36dfa6eafff7107 | a2014b6912df50ad22da5b2f3d21bf01cbd8e192 | refs/heads/master | 2020-12-11T03:42:50.309869 | 2014-10-05T14:05:17 | 2014-10-05T14:05:17 | 24,826,519 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
autotools.configure("--libexecdir=/usr/lib/gnome-color-manager")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.insinto("/usr/share/pixmaps", "data/icons/48x48/gnome-color-manager.png")
pisitools.dodoc("AUTHORS", "COPYING", "ChangeLog", "NEWS", "README")
| [
"[email protected]"
] | |
9d6a7d467f536f81f74bd7a97d1b3f132f5b0116 | 46ff8840ec7b3949c7f9c8d3262252b76761df3a | /fil_finder/filament.py | 6c8828ef6322203f9914544ddb6cd32e86aefcb6 | [
"MIT"
] | permissive | samdf96/FilFinder | cc087fbc78ff8db85dae4b8200d41a607dae00be | 340497399b782e855840e48b6a00f979babfad38 | refs/heads/master | 2023-09-04T11:56:20.672554 | 2019-10-25T22:11:38 | 2019-10-25T22:11:38 | 248,880,381 | 0 | 1 | MIT | 2020-03-21T00:45:53 | 2020-03-21T00:45:52 | null | UTF-8 | Python | false | false | 55,271 | py | # Licensed under an MIT open source license - see LICENSE
import numpy as np
import astropy.units as u
import networkx as nx
import warnings
import scipy.ndimage as nd
from astropy.nddata import extract_array
import astropy.modeling as mod
from astropy.modeling.models import Gaussian1D, Const1D
import sys
if sys.version_info[0] >= 3:
import _pickle as pickle
else:
import cPickle as pickle
from .length import (init_lengths, main_length, make_final_skeletons,
pre_graph, longest_path, prune_graph)
from .pixel_ident import pix_identify
from .utilities import pad_image, in_ipynb, red_chisq
from .base_conversions import UnitConverter
from .rollinghough import rht
from .width import (radial_profile, gaussian_model, fit_radial_model,
nonparam_width)
class FilamentNDBase(object):
"""
Analysis and properties of a single filament object.
"""
@property
def pixel_coords(self):
return self._pixel_coords
@property
def pixel_extents(self):
return [tuple([coord.min() for coord in self._orig_pixel_coords]),
tuple([coord.max() for coord in self._orig_pixel_coords])]
def position(self, world_coord=False):
'''
Return the centre position of the filament based on the pixel
coordinates.
'''
centres = [np.median(coord) for coord in self._orig_pixel_coords]
if world_coord:
if hasattr(self._converter, '_wcs'):
wcs = self._converter._wcs
# Convert to world coordinates
posn_tuple = centres + [0]
w_centres = wcs.all_pix2world(*posn_tuple)
# Attach units
wu_centres = [val * u.Unit(wcs.wcs.cunit[i]) for i, val
in enumerate(w_centres)]
return wu_centres
else:
warnings.warn("No WCS information given. Returning pixel"
" position.")
return [centre * u.pix for centre in centres]
else:
return [centre * u.pix for centre in centres]
class Filament2D(FilamentNDBase):
"""
Analysis and properties of a 2D filament.
Parameters
----------
pixel_coords : tuple of `~np.ndarray`
Pixel coordinates as a set of arrays (i.e., the output from
`~numpy.where`).
converter : `~fil_finder.base_conversions.UnitConverter`, optional
Unit converter class.
wcs : `~astropy.wcs.WCS`, optional
WCS information for the pixel set.
distance : `~astropy.units.Quantity`, optional
Distance to the region described by the pixel set. Requires for
conversions to physical units.
"""
def __init__(self, pixel_coords, converter=None, wcs=None, distance=None):
super(Filament2D, self).__init__()
self._pixel_coords = pixel_coords
# Create a separate account of the initial skeleton pixels
self._orig_pixel_coords = pixel_coords
if converter is not None:
self._converter = converter
else:
self._converter = UnitConverter(wcs=wcs, distance=distance)
def image_slicer(self, image, out_shape, pad_size=0):
'''
Create a cut-out of a given image to some output shape with optional
padding on the edges. The given image must be on the same pixel grid
as the image used to create the skeleton.
Parameters
----------
image : `~numpy.ndarray` or `~astropy.units.Quantity`
Image to slice out around the skeleton.
out_shape : tuple
2D output shape.
pad_size : int, optional
Number of pixels to pad.
Returns
-------
out_arr : `~numpy.ndarray` or `~astropy.units.Quantity`
Output array with given shape.
'''
arr_cent = [(out_shape[0] - pad_size * 2 - 1) / 2. +
self.pixel_extents[0][0],
(out_shape[1] - pad_size * 2 - 1) / 2. +
self.pixel_extents[0][1]]
out_arr = extract_array(image, out_shape, arr_cent)
# astropy v4.0 now retains the unit. So only add a unit
# when out_arr isn't a Quantity
if hasattr(image, "unit") and not hasattr(out_arr, 'unit'):
out_arr = out_arr * image.unit
return out_arr
def skeleton(self, pad_size=0, corner_pix=None, out_type='all'):
'''
Create a mask from the pixel coordinates.
Parameters
----------
pad_size : int, optional
Number of pixels to pad along each edge.
corner_pix : tuple of ints, optional
The position of the left-bottom corner of the pixels in the
skeleton. Used for offsetting the location of the pixels.
out_type : {"all", "longpath"}, optional
Return the entire skeleton or just the longest path. Default is to
return the whole skeleton.
Returns
-------
mask : `~numpy.ndarray`
Boolean mask containing the skeleton pixels.
'''
pad_size = int(pad_size)
if pad_size < 0:
raise ValueError("pad_size must be a positive integer.")
if corner_pix is None:
# Place the smallest pixel in the set at the pad size
corner_pix = [pad_size, pad_size]
out_types = ['all', 'longpath']
if out_type not in out_types:
raise ValueError("out_type must be 'all' or 'longpath'.")
y_shape = self.pixel_extents[1][0] - self.pixel_extents[0][0] + \
2 * pad_size + 1
x_shape = self.pixel_extents[1][1] - self.pixel_extents[0][1] + \
2 * pad_size + 1
mask = np.zeros((y_shape, x_shape), dtype=bool)
if out_type == 'all':
pixels = self.pixel_coords
else:
if not hasattr(self, '_longpath_pixel_coords'):
raise AttributeError("longest path is not defined. Run "
"`Filament2D.skeleton_analysis` first.")
pixels = self.longpath_pixel_coords
mask[pixels[0] - self.pixel_extents[0][0] + corner_pix[0],
pixels[1] - self.pixel_extents[0][1] + corner_pix[1]] = True
return mask
def skeleton_analysis(self, image, verbose=False, save_png=False,
save_name=None, prune_criteria='all',
relintens_thresh=0.2, max_prune_iter=10,
branch_thresh=0 * u.pix):
'''
Run the skeleton analysis.
Separates skeleton structures into branches and intersections. Branches
below the pruning criteria are removed. The structure is converted into
a graph object to find the longest path. The pruned skeleton is used in
the subsequent analysis steps.
Parameters
----------
image : `~numpy.ndarray` or `~astropy.units.Quantity`
Data the filament was extracted from.
verbose : bool, optional
Show intermediate plots.
save_png : bool, optional
Save the plots in verbose mode.
save_name : str, optional
Prefix for the saved plots.
prune_criteria : {'all', 'intensity', 'length'}, optional
Choose the property to base pruning on. 'all' requires that the
branch fails to satisfy the length and relative intensity checks.
relintens_thresh : float, optional
Value between 0 and 1 that sets the relative importance of the
intensity-to-length criteria when pruning. Only used if
`prune_criteria='all'`.
max_prune_iter : int, optional
Maximum number of pruning iterations to apply.
branch_thresh : `~astropy.units.Quantity`, optional
Minimum length for a branch to be eligible to be pruned.
'''
# NOTE:
# All of these functions are essentially the same as those used for
# fil_finder_2D. For now, they all are expecting lists with each
# filament property as an element. Everything is wrapped to be a list
# because of this, but will be removed once fil_finder_2D is removed.
# A lot of this can be streamlined in that process.
if save_png and save_name is None:
raise ValueError("save_name must be given when save_png=True.")
# Must have a pad size of 1 for the morphological operations.
pad_size = 1
self._pad_size = pad_size
branch_thresh = self._converter.to_pixel(branch_thresh)
# Do we need to pad the image before slicing?
input_image = pad_image(image, self.pixel_extents, pad_size)
skel_mask = self.skeleton(pad_size=pad_size)
# If the padded image matches the mask size, don't need additional
# slicing
if input_image.shape != skel_mask.shape:
input_image = self.image_slicer(input_image, skel_mask.shape,
pad_size=pad_size)
# The mask and sliced image better have the same shape!
if input_image.shape != skel_mask.shape:
raise AssertionError("Sliced image shape does not equal the mask "
"shape. This should never happen! If you see"
" this issue, please report it as a bug!")
iter = 0
while True:
skel_mask = self.skeleton(pad_size=pad_size)
interpts, hubs, ends, filbranches, labeled_mask = \
pix_identify([skel_mask], 1)
branch_properties = init_lengths(labeled_mask, filbranches,
[[(0, 0), (0, 0)]],
input_image)
edge_list, nodes, loop_edges = \
pre_graph(labeled_mask, branch_properties, interpts, ends)
max_path, extremum, G = \
longest_path(edge_list, nodes,
verbose=False,
skeleton_arrays=labeled_mask)
# Skip pruning if skeleton has only one branch
if len(G[0].nodes()) > 1:
updated_lists = \
prune_graph(G, nodes, edge_list, max_path, labeled_mask,
branch_properties, loop_edges,
prune_criteria=prune_criteria,
length_thresh=branch_thresh.value,
relintens_thresh=relintens_thresh,
max_iter=1)
labeled_mask, edge_list, nodes, branch_properties = \
updated_lists
final_fil_arrays =\
make_final_skeletons(labeled_mask, interpts,
verbose=False)
# Update the skeleton pixels
good_pix = np.where(final_fil_arrays[0])
self._pixel_coords = \
(good_pix[0] + self.pixel_extents[0][0] - pad_size,
good_pix[1] + self.pixel_extents[0][1] - pad_size)
if iter == 0:
prev_G = G[0]
iter += 1
if iter == max_prune_iter:
break
else:
continue
# Isomorphic comparison is failing for networkx 2.1
# I don't understand the error, so we'll instead require
# that the nodes be the same. This should be safe as
# pruning can only remove nodes.
# edge_match = iso.numerical_edge_match('weight', 1)
# if nx.is_isomorphic(prev_G, G[0],
# edge_match=edge_match):
# the node attribute was removed in 2.4.
if hasattr(G, 'node'):
if prev_G.node == G[0].node:
break
if hasattr(G, 'nodes'):
if prev_G.nodes == G[0].nodes:
break
prev_G = G[0]
iter += 1
if iter >= max_prune_iter:
warnings.warn("Graph pruning reached max iterations.")
break
self._graph = G[0]
# Run final analyses for plotting, etc.
max_path, extremum, G = \
longest_path(edge_list, nodes,
verbose=verbose,
save_png=save_png,
save_name="{0}_graphstruct.png".format(save_name),
skeleton_arrays=labeled_mask)
length_output = main_length(max_path, edge_list, labeled_mask,
interpts,
branch_properties["length"],
1.,
verbose=verbose, save_png=save_png,
save_name="{0}_longestpath.png".format(save_name))
lengths, long_path_array = length_output
good_long_pix = np.where(long_path_array[0])
self._longpath_pixel_coords = \
(good_long_pix[0] + self.pixel_extents[0][0] - pad_size,
good_long_pix[1] + self.pixel_extents[0][1] - pad_size)
self._length = lengths[0] * u.pix
final_fil_arrays =\
make_final_skeletons(labeled_mask, interpts,
verbose=verbose, save_png=save_png,
save_name="{0}_finalskeleton.png".format(save_name))
# Track the final intersection and end points
interpts, hubs, ends = \
pix_identify([final_fil_arrays[0].copy()], 1)[:3]
# Adjust intersection and end points to be in the original array
# positions
corr_inters = []
for inter in interpts[0]:
per_inter = []
for ints in inter:
per_inter.append((ints[0] + self.pixel_extents[0][0] - pad_size,
ints[1] + self.pixel_extents[0][1] - pad_size))
corr_inters.append(per_inter)
self._interpts = corr_inters
corr_ends = []
for end in ends[0]:
corr_ends.append((end[0] + self.pixel_extents[0][0] - pad_size,
end[1] + self.pixel_extents[0][1] - pad_size))
self._endpts = corr_ends
# Update the skeleton pixels
good_pix = np.where(final_fil_arrays[0])
self._pixel_coords = \
(good_pix[0] + self.pixel_extents[0][0] - pad_size,
good_pix[1] + self.pixel_extents[0][1] - pad_size)
self._branch_properties = \
{'length': branch_properties['length'][0] * u.pix,
'intensity': np.array(branch_properties['intensity'][0]),
'number': branch_properties['number'][0],
'pixels': branch_properties['pixels'][0]}
@property
def branch_properties(self):
'''
Dictionary with branch lengths, average intensity, and pixels.
'''
return self._branch_properties
def branch_pts(self, img_coords=False):
'''
Pixels within each skeleton branch.
Parameters
----------
img_coords : bool
Return the branch pts in coordinates of the original image.
'''
if not img_coords:
return self.branch_properties['pixels']
# Transform from per-filament to image coords
img_branch_pts = []
for bpts in self.branch_properties['pixels']:
bpts_copy = bpts.copy()
bpts_copy[:, 0] = bpts[:, 0] + self.pixel_extents[0][0] - self._pad_size
bpts_copy[:, 1] = bpts[:, 1] + self.pixel_extents[0][1] - self._pad_size
img_branch_pts.append(bpts_copy)
return img_branch_pts
@property
def intersec_pts(self):
'''
Skeleton pixels associated intersections.
'''
return self._interpts
@property
def end_pts(self):
'''
Skeleton pixels associated branch end.
'''
return self._endpts
def length(self, unit=u.pixel):
'''
The longest path length of the skeleton
Parameters
----------
unit : `~astropy.units.Unit`, optional
Pixel, angular, or physical unit to convert to.
'''
return self._converter.from_pixel(self._length, unit)
@property
def longpath_pixel_coords(self):
'''
Pixel coordinates of the longest path.
'''
return self._longpath_pixel_coords
@property
def graph(self):
'''
The networkx graph for the filament.
'''
return self._graph
def plot_graph(self, save_name=None, layout_func=nx.spring_layout):
'''
Plot the graph structure.
Parameters
----------
save_name : str, optional
Name of saved plot. A plot is only saved if a name is given.
layout_func : networkx layout function, optional
Layout function from networkx. Defaults to `spring_layout`.
'''
import matplotlib.pyplot as plt
G = self.graph
elist = [(u, v) for (u, v, d) in G.edges(data=True)]
posns = layout_func(G)
nx.draw_networkx_nodes(G, posns, node_size=200)
nx.draw_networkx_edges(G, posns, edgelist=elist, width=2)
nx.draw_networkx_labels(G, posns, font_size=10,
font_family='sans-serif')
plt.axis('off')
if save_name is not None:
# Save the plot
plt.savefig(save_name)
plt.close()
else:
plt.show()
# Add in the ipynb checker
def rht_analysis(self, radius=10 * u.pix, ntheta=180,
background_percentile=25):
'''
Use the RHT to find the filament orientation and dispersion of the
longest path.
Parameters
----------
radius : `~astropy.units.Quantity`, optional
Radius of the region to compute the orientation within. Converted
to pixel units and rounded to the nearest integer.
ntheta : int, optional
Number of angles to sample at. Default is 180.
background_percentile : float, optional
Float between 0 and 100 that sets a background level for the RHT
distribution before calculating orientation and curvature.
'''
if not hasattr(radius, 'unit'):
warnings.warn("Radius has no given units. Assuming pixel units.")
radius *= u.pix
radius = int(round(self._converter.to_pixel(radius).value))
longpath_arr = self.skeleton(out_type='longpath')
longpath_arr = np.fliplr(longpath_arr)
theta, R, quant = rht(longpath_arr, radius, ntheta,
background_percentile)
twofive, mean, sevenfive = quant
self._orientation = mean * u.rad
if sevenfive > twofive:
self._curvature = np.abs(sevenfive - twofive) * u.rad
else:
self._curvature = (np.abs(sevenfive - twofive) + np.pi) * u.rad
self._orientation_hist = [theta, R]
self._orientation_quantiles = [twofive, sevenfive]
@property
def orientation_hist(self):
'''
Distribution of orientations from the RHT along the longest path.
Contains the angles of the distribution bins and the values in those
bins.
'''
return self._orientation_hist
@property
def orientation(self):
'''
Mean orientation of the filament along the longest path.
'''
return self._orientation
@property
def curvature(self):
'''
Interquartile range of the RHT orientation distribution along the
longest path.
'''
return self._curvature
def plot_rht_distrib(self, save_name=None):
'''
Plot the RHT distribution from `Filament2D.rht_analysis`.
Parameters
----------
save_name : str, optional
Name of saved plot. A plot is only saved if a name is given.
'''
theta = self.orientation_hist[0]
R = self.orientation_hist[1]
import matplotlib.pyplot as plt
median = self.orientation.value
twofive, sevenfive = self._orientation_quantiles
ax1 = plt.subplot(121, polar=True)
ax1.plot(2 * theta, R / R.max(), "kD")
ax1.fill_between(2 * theta, 0,
R[:, 0] / R.max(),
facecolor="blue",
interpolate=True, alpha=0.5)
ax1.set_rmax(1.0)
ax1.plot([2 * median] * 2, np.linspace(0.0, 1.0, 2), "g")
ax1.plot([2 * twofive] * 2, np.linspace(0.0, 1.0, 2),
"b--")
ax1.plot([2 * sevenfive] * 2, np.linspace(0.0, 1.0, 2),
"b--")
plt.subplot(122)
plt.imshow(self.skeleton(out_type='longpath'),
cmap="binary", origin="lower")
if save_name is not None:
plt.savefig(save_name)
plt.close()
else:
plt.show()
def rht_branch_analysis(self, radius=10 * u.pix, ntheta=180,
background_percentile=25,
min_branch_length=3 * u.pix):
'''
Use the RHT to find the filament orientation and dispersion of each
branch in the filament.
Parameters
----------
radius : `~astropy.units.Quantity`, optional
Radius of the region to compute the orientation within. Converted
to pixel units and rounded to the nearest integer.
ntheta : int, optional
Number of angles to sample at. Default is 180.
background_percentile : float, optional
Float between 0 and 100 that sets a background level for the RHT
distribution before calculating orientation and curvature.
min_branch_length : `~astropy.units.Quantity`, optional
Minimum length of a branch to run the RHT on. Branches that are
too short will cause spikes along the axis angles or 45 deg. off.
'''
# Convert length cut to pixel units
if not hasattr(radius, 'unit'):
warnings.warn("Radius has no given units. Assuming pixel units.")
radius *= u.pix
if not hasattr(min_branch_length, 'unit'):
warnings.warn("min_branch_length has no given units. Assuming "
"pixel units.")
min_branch_length *= u.pix
radius = int(round(self._converter.to_pixel(radius).value))
min_branch_length = self._converter.to_pixel(min_branch_length).value
means = []
iqrs = []
# Make padded arrays from individual branches
for i, (pix, length) in enumerate(zip(self.branch_pts(img_coords=False),
self.branch_properties['length'])):
if length.value < min_branch_length:
means.append(np.NaN)
iqrs.append(np.NaN)
continue
# Setup size of array
ymax = pix[:, 0].max()
ymin = pix[:, 0].min()
xmax = pix[:, 1].max()
xmin = pix[:, 1].min()
shape = (ymax - ymin + 1 + 2 * radius,
xmax - xmin + 1 + 2 * radius)
branch_array = np.zeros(shape, dtype=bool)
branch_array[pix[:, 0] - ymin + radius,
pix[:, 1] - xmin + radius] = True
branch_array = np.fliplr(branch_array)
theta, R, quant = rht(branch_array, radius, ntheta,
background_percentile)
twofive, mean, sevenfive = quant
means.append(mean)
if sevenfive > twofive:
iqrs.append(np.abs(sevenfive - twofive))
else:
iqrs.append(np.abs(sevenfive - twofive) + np.pi)
self._orientation_branches = np.array(means) * u.rad
self._curvature_branches = np.array(iqrs) * u.rad
@property
def orientation_branches(self):
'''
Orientations along each branch in the filament.
'''
return self._orientation_branches
@property
def curvature_branches(self):
'''
Curvature along each branch in the filament.
'''
return self._curvature_branches
def width_analysis(self, image, all_skeleton_array=None,
max_dist=10 * u.pix,
pad_to_distance=0 * u.pix,
fit_model='gaussian_bkg',
fitter=None,
try_nonparam=True,
use_longest_path=False,
add_width_to_length=False,
deconvolve_width=True,
beamwidth=None,
fwhm_function=None,
chisq_max=10.,
**kwargs):
'''
Create an average radial profile for the filament and fit a given
model.
Parameters
----------
image : `~astropy.unit.Quantity` or `~numpy.ndarray`
The image from which the filament was extracted.
all_skeleton_array : np.ndarray
An array with the skeletons of other filaments. This is used to
avoid double-counting pixels in the radial profiles in nearby
filaments.
max_dist : `~astropy.units.Quantity`, optional
Largest radius around the skeleton to create the profile from. This
can be given in physical, angular, or physical units.
pad_to_distance : `~astropy.units.Quantity`, optional
Force all pixels within this distance to be kept, even if a pixel
is closer to another skeleton, as given in `all_skeleton_array`.
fit_model : str or `~astropy.modeling.Fittable1DModel`, optional
The model to fit to the profile. Built-in models include
'gaussian_bkg' for a Gaussian with a constant background,
'gaussian_nobkg' for just a Gaussian, 'nonparam' for the
non-parametric estimator. Defaults to 'gaussian_bkg'.
fitter : `~astropy.modeling.fitting.Fitter`, optional
One of the astropy fitting classes. Defaults to a
Levenberg-Marquardt fitter.
try_nonparam : bool, optional
If the chosen model fit fails, fall back to a non-parametric
estimate.
use_longest_path : bool, optional
Only fit profile to the longest path skeleton. Disabled by
default.
add_width_to_length : bool, optional
Add the FWHM to the filament length. This accounts for the
expected shortening in the medial axis transform. Enabled by
default.
deconvolve_width : bool, optional
Deconvolve the beam width from the FWHM. Enabled by default.
beamwidth : `~astropy.units.Quantity`, optional
The beam width to deconvolve the FWHM from. Required if
`deconvolve_width = True`.
fwhm_function : function, optional
Convert the width parameter to the FWHM. Must take the fit model
as an argument and return the FWHM and its uncertainty. If no
function is given, the Gaussian FWHM is used.
chisq_max : float, optional
Enable the fail flag if the reduced chi-squared value is above
this limit.
kwargs : Passed to `~fil_finder.width.radial_profile`.
'''
# Convert quantities to pixel units.
max_dist = self._converter.to_pixel(max_dist).value
pad_to_distance = self._converter.to_pixel(pad_to_distance).value
if deconvolve_width and beamwidth is None:
raise ValueError("beamwidth must be given when deconvolve_width is"
" enabled.")
if beamwidth is not None:
beamwidth = self._converter.to_pixel(beamwidth)
# Use the max dist as the pad size
pad_size = int(np.ceil(max_dist))
# if given a master skeleton array, require it to be the same shape as
# the image
if all_skeleton_array is not None:
if all_skeleton_array.shape != image.shape:
raise ValueError("The shape of all_skeleton_array must match"
" the given image.")
if use_longest_path:
skel_array = self.skeleton(pad_size=pad_size, out_type='longpath')
else:
skel_array = self.skeleton(pad_size=pad_size, out_type='all')
out_shape = skel_array.shape
input_image = self.image_slicer(image, out_shape, pad_size=pad_size)
if all_skeleton_array is not None:
input_all_skeleton_array = \
self.image_slicer(all_skeleton_array, out_shape,
pad_size=pad_size)
else:
input_all_skeleton_array = None
# Create distance arrays to build profile from
dist_skel_arr = nd.distance_transform_edt(np.logical_not(skel_array))
# And create a distance array from the full skeleton array if given
if input_all_skeleton_array is not None:
dist_skel_all = nd.distance_transform_edt(np.logical_not(input_all_skeleton_array))
else:
dist_skel_all = None
# Need the unbinned data for the non-parametric fit.
out = radial_profile(input_image, dist_skel_all,
dist_skel_arr,
[(0, 0), (0, 0)],
max_distance=max_dist,
pad_to_distance=pad_to_distance,
**kwargs)
if out is None:
raise ValueError("Building radial profile failed. Check the input"
" image for NaNs.")
else:
dist, radprof, weights, unbin_dist, unbin_radprof = out
# Attach units
xunit = u.pix
if hasattr(image, 'unit'):
yunit = image.unit
else:
yunit = u.dimensionless_unscaled
self._yunit = yunit
radprof = radprof * yunit
dist = dist * xunit
self._radprofile = [dist, radprof]
self._unbin_radprofile = [unbin_dist * xunit,
unbin_radprof * yunit]
# Make sure the given model is valid
if not isinstance(fit_model, mod.Model):
skip_fitting = False
self._radprof_type = fit_model
# Check the default types
if fit_model == "gaussian_bkg":
fit_model = gaussian_model(dist, radprof, with_bkg=True)
elif fit_model == "gaussian_nobkg":
fit_model = gaussian_model(dist, radprof, with_bkg=False)
elif fit_model == "nonparam":
skip_fitting = True
else:
raise ValueError("fit_model must be an "
"astropy.modeling.Fittable1DModel or "
"one of the default models: 'gaussian_bkg',"
" 'gaussian_nobkg', or 'nonparam'.")
else:
# Record the fit type
self._radprof_type = fit_model.name
if not skip_fitting:
fitted_model, fitter = fit_radial_model(dist, radprof, fit_model,
weights=weights)
# Only keep the non-fixed parameters. The fixed parameters won't
# appear in the covariance matrix.
params = []
names = []
for name in fitted_model.param_names:
# Check if it is fixed:
if fitted_model.fixed[name]:
continue
param = getattr(fitted_model, name)
if param.quantity is not None:
params.append(param.quantity)
else:
# Assign a dimensionless unit
params.append(param.value * u.dimensionless_unscaled)
names.append(name)
self._radprof_params = params
npar = len(self.radprof_params)
self._radprof_parnames = names
self._radprof_model = fitted_model
self._radprof_fitter = fitter
# Fail checks
fail_flag = False
param_cov = fitter.fit_info.get('param_cov')
if param_cov is not None:
fit_uncert = list(np.sqrt(np.diag(param_cov)))
else:
fit_uncert = [np.NaN] * npar
fail_flag = True
if len(fit_uncert) != len(params):
raise ValueError("The number of parameters does not match the "
"number from the covariance matrix. Check for"
" fixed parameters.")
# Add units to errors
for i, par in enumerate(params):
fit_uncert[i] = fit_uncert[i] * par.unit
self._radprof_errors = fit_uncert
# Check if units should be kept
if fitted_model._supports_unit_fitting:
modvals = fitted_model(dist)
radprof_vals = radprof
else:
modvals = fitted_model(dist.value)
radprof_vals = radprof.value
chisq = red_chisq(radprof_vals, modvals, npar, 1)
if chisq > chisq_max:
fail_flag = True
if (skip_fitting or fail_flag) and try_nonparam:
fit, fit_error, fail_flag = \
nonparam_width(dist.value, radprof.value,
unbin_dist, unbin_radprof,
None, 5, 99)
self._radprof_type = 'nonparam'
# Make the equivalent Gaussian model w/ a background
self._radprof_model = Gaussian1D() + Const1D()
if self._radprof_model._supports_unit_fitting:
self._radprof_model.amplitude_0 = fit[0] * yunit
self._radprof_model.mean_0 = 0.0 * xunit
self._radprof_model.sigma_0 = fit[1] * xunit
self._radprof_model.amplitude_1 = fit[2] * yunit
else:
self._radprof_model.amplitude_0 = fit[0]
self._radprof_model.mean_0 = 0.0
self._radprof_model.sigma_0 = fit[1]
self._radprof_model.amplitude_1 = fit[2]
# Slice out the FWHM and add units
params = [fit[0] * yunit, fit[1] * xunit, fit[2] * yunit]
errs = [fit_error[0] * yunit, fit_error[1] * xunit,
fit_error[2] * yunit]
self._radprof_params = params
self._radprof_errors = errs
self._radprof_parnames = ['amplitude_0', 'stddev_0', 'amplitude_1']
if fwhm_function is not None:
fwhm = fwhm_function(fitted_model)
else:
# Default to Gaussian FWHM
for idx, name in enumerate(self.radprof_parnames):
if "stddev" in name:
found_width = True
break
if found_width:
fwhm = self.radprof_params[idx].value * np.sqrt(8 * np.log(2)) * xunit
fwhm_err = self.radprof_errors[idx].value * np.sqrt(8 * np.log(2)) * xunit
else:
raise ValueError("Could not automatically identify which "
"parameter in the model corresponds to the "
"width. Please pass a function to "
"'fwhm_function' to identify the width "
"parameter.")
if deconvolve_width:
fwhm_deconv_sq = fwhm**2 - beamwidth**2
if fwhm_deconv_sq > 0:
fwhm_deconv = np.sqrt(fwhm_deconv_sq)
fwhm_deconv_err = fwhm * fwhm_err / fwhm_deconv
else:
fwhm_deconv = np.NaN
fwhm_deconv_err = np.NaN
warnings.warn("Width could not be deconvolved from the beam "
"width.")
else:
fwhm_deconv = fwhm
fwhm_deconv_err = fwhm_err
self._fwhm = fwhm_deconv
self._fwhm_err = fwhm_deconv_err
# Final width check -- make sure length is longer than the width.
# If it is, add the width onto the length since the adaptive
# thresholding shortens each edge by the about the same.
if self.length() < self._fwhm:
fail_flag = True
# Add the width onto the length if enabled
if add_width_to_length:
if fail_flag:
warnings.warn("Ignoring adding the width to the length because"
" the fail flag was raised for the fit.")
else:
self._length += self._fwhm
self._radprof_failflag = fail_flag
@property
def radprof_fit_fail_flag(self):
'''
Flag to catch poor fits.
'''
return self._radprof_failflag
@property
def radprof_type(self):
'''
The model type used to fit the radial profile.
'''
return self._radprof_type
@property
def radprofile(self):
'''
The binned radial profile created in `~FilFinder2D.width_analysis`.
This contains the distances and the profile value in the distance bin.
'''
return self._radprofile
@property
def radprof_params(self):
'''
Fit parameters from `~FilFinder2D.width_analysis`.
'''
return self._radprof_params
@property
def radprof_errors(self):
'''
Fit uncertainties from `~FilFinder2D.width_analysis`.
'''
return self._radprof_errors
def radprof_fwhm(self, unit=u.pixel):
'''
The FWHM of the fitted radial profile and its uncertainty.
Parameters
----------
unit : `~astropy.units.Unit`, optional
Pixel, angular, or physical unit to convert to.
'''
return self._converter.from_pixel(self._fwhm, unit), \
self._converter.from_pixel(self._fwhm_err, unit)
@property
def radprof_parnames(self):
'''
Parameter names from `~FilFinder2D.radprof_model`.
'''
return self._radprof_parnames
def radprof_fit_table(self, unit=u.pix):
'''
Return an `~astropy.table.Table` with the fit parameters and
uncertainties.
Parameters
----------
unit : `~astropy.units.Unit`, optional
Pixel, angular, or physical unit to convert to.
'''
from astropy.table import Table, Column
tab = Table()
for name, val, err in zip(self.radprof_parnames, self.radprof_params,
self.radprof_errors):
# Try converting to the given unit. Assume failures are not length
# units.
try:
conv_val = self._converter.from_pixel(val, unit)
conv_err = self._converter.from_pixel(err, unit)
except u.UnitsError:
conv_val = val
conv_err = err
tab[name] = Column(conv_val.reshape((1,)))
tab[name + "_err"] = Column(conv_err.reshape((1,)))
# Add on the FWHM
tab['fwhm'] = Column(self.radprof_fwhm(unit)[0].reshape((1,)))
tab['fwhm_err'] = Column(self.radprof_fwhm(unit)[1].reshape((1,)))
# Add on whether the fit was "successful"
tab['fail_flag'] = Column([self.radprof_fit_fail_flag])
# Add the type of fit based on the model type
tab['model_type'] = Column([self.radprof_type])
return tab
@property
def radprof_model(self):
'''
The fitted radial profile model.
'''
return self._radprof_model
def plot_radial_profile(self, save_name=None, xunit=u.pix,
ax=None):
'''
Plot the radial profile of the filament and the fitted model.
Parameters
----------
xunit : `~astropy.units.Unit`, optional
Pixel, angular, or physical unit to convert to.
ax : `~matplotlib.axes`, optional
Use an existing set of axes to plot the profile.
'''
dist, radprof = self.radprofile
model = self.radprof_model
conv_dist = self._converter.from_pixel(dist, xunit)
import matplotlib.pyplot as plt
if ax is None:
ax = plt.subplot(111)
ax.plot(conv_dist, radprof, "kD")
points = np.linspace(np.min(dist),
np.max(dist), 5 * len(dist))
# Check if units should be kept when evaluating the model
if not model._supports_unit_fitting:
points = points.value
conv_points = np.linspace(np.min(conv_dist),
np.max(conv_dist), 5 * len(conv_dist))
ax.plot(conv_points, model(points), "r")
ax.set_xlabel(r'Radial Distance ({})'.format(xunit))
ax.set_ylabel(r'Intensity ({})'.format(self._yunit))
ax.grid(True)
plt.tight_layout()
if save_name is not None:
plt.savefig(save_name)
plt.show()
if in_ipynb():
plt.clf()
def total_intensity(self, bkg_subtract=False, bkg_mod_index=2):
'''
Return the sum of all pixels within the FWHM of the filament.
.. warning::
`fil_finder_2D` multiplied the total intensity by the angular size
of a pixel. This function is just the sum of pixel values. Unit
conversions can be applied on the output if needed.
Parameters
----------
bkg_subtract : bool, optional
Subtract off the fitted background level.
bkg_mod_index : int, optional
Indicate which element in `Filament2D.radprof_params` is the
background level. Defaults to 2 for the Gaussian with background
model.
Returns
-------
total_intensity : `~astropy.units.Quantity`
The total intensity for the filament.
'''
within_fwhm = self._unbin_radprofile[0] <= \
0.5 * self.radprof_fwhm()[0]
total_intensity = np.sum(self._unbin_radprofile[1][within_fwhm])
if bkg_subtract:
bkg = self.radprof_params[bkg_mod_index]
if not self.radprof_model._supports_unit_fitting:
bkg = bkg.value * total_intensity.unit
total_intensity -= bkg * within_fwhm.sum()
return total_intensity
def model_image(self, max_radius=20 * u.pix, bkg_subtract=True,
bkg_mod_index=2):
'''
Return a model image from the radial profile fit.
Parameters
----------
max_radius : `~astropy.units.Quantity`, optional
Set the radius to compute the model to. The outputted array
will be padded by the number of pixels the max_radius corresponds
to.
bkg_subtract : bool, optional
Subtract off the fitted background level.
bkg_mod_index : int, optional
Indicate which element in `Filament2D.radprof_params` is the
background level. Defaults to 2 for the Gaussian with background
model.
Returns
-------
model_array : `~astropy.units.Quantity`
A 2D array computed using the radial profile model.
'''
max_radius = self._converter.to_pixel(max_radius).value
pad_size = int(max_radius)
skel_arr = self.skeleton(pad_size)
dists = nd.distance_transform_edt(~skel_arr)
if self.radprof_model._supports_unit_fitting:
dists = dists * u.pix
if not bkg_subtract:
return self.radprof_model(dists)
else:
bkg = self.radprof_params[bkg_mod_index]
if not self.radprof_model._supports_unit_fitting:
bkg = bkg.value
return self.radprof_model(dists) - bkg
def median_brightness(self, image):
'''
Return the median brightness along the skeleton of the filament.
Parameters
----------
image : `~numpy.ndarray` or `~astropy.units.Quantity`
The image from which the filament was extracted.
Returns
-------
median_brightness : float or `~astropy.units.Quantity`
Median brightness along the skeleton.
'''
pad_size = 1
# Do we need to pad the image before slicing?
input_image = pad_image(image, self.pixel_extents, pad_size)
skels = self.skeleton(pad_size=pad_size)
# If the padded image matches the mask size, don't need additional
# slicing
if input_image.shape != skels.shape:
input_image = self.image_slicer(input_image, skels.shape,
pad_size=pad_size)
assert input_image.shape == skels.shape
return np.nanmedian(input_image[skels])
def ridge_profile(self, image):
'''
Return the image values along the longest path extent of a filament, or
from radial slices along the longest path.
Parameters
----------
image : `~numpy.ndarray` or `~astropy.units.Quantity`
The image from which the filament was extracted.
'''
pad_size = 1
# Do we need to pad the image before slicing?
input_image = pad_image(image, self.pixel_extents, pad_size) * \
u.dimensionless_unscaled
skels = self.skeleton(pad_size=pad_size, out_type='longpath')
# If the padded image matches the mask size, don't need additional
# slicing
if input_image.shape != skels.shape:
input_image = self.image_slicer(input_image, skels.shape,
pad_size=pad_size)
# These should have the same shape now.
assert input_image.shape == skels.shape
from .width_profiles.profile_line_width import walk_through_skeleton
order_pts = walk_through_skeleton(skels)
if hasattr(image, 'unit'):
unit = image.unit
else:
unit = u.dimensionless_unscaled
input_image = input_image * unit
values = []
for pt in order_pts:
values.append(input_image[pt[0], pt[1]].value)
return values * unit
def profile_analysis(self, image, max_dist=20 * u.pix,
num_avg=3, xunit=u.pix):
'''
Create profiles of radial slices along the longest path skeleton.
Profiles created from `~fil_finder.width_profiles.filament_profile`.
.. note::
Does not include fitting to the radial profiles. Limited fitting
of Gaussian profiles is provided in
`~fil_finder.width_profiles.filament_profile`. See a dedicated
package like `radfil <https://github.com/catherinezucker/radfil>`_
for modeling profiles.
Parameters
----------
image : `~numpy.ndarray` or `~astropy.units.Quantity`
The image from which the filament was extracted.
max_dist : astropy Quantity, optional
The angular or physical (when distance is given) extent to create
the profile away from the centre skeleton pixel. The entire
profile will be twice this value (for each side of the profile).
num_avg : int, optional
Number of points before and after a pixel that is used when
computing the normal vector. Using at least three points is
recommended due to small pixel instabilities in the skeletons.
Returns
-------
dists : `~astropy.units.Quantity`
Distances in the radial profiles from the skeleton. Units set by
`xunit`.
profiles : `~astropy.units.Quantity`
Radial image profiles.
'''
from .width_profiles import filament_profile
max_dist = self._converter.to_pixel(max_dist)
pad_size = int(max_dist.value)
# Do we need to pad the image before slicing?
input_image = pad_image(image, self.pixel_extents, pad_size)
if hasattr(image, 'unit'):
input_image = input_image * image.unit
else:
input_image = input_image * u.dimensionless_unscaled
skels = self.skeleton(pad_size=pad_size, out_type='longpath')
# If the padded image matches the mask size, don't need additional
# slicing
if input_image.shape != skels.shape:
input_image = self.image_slicer(input_image, skels.shape,
pad_size=pad_size)
# Check if angular conversions are defined. If not, stay in pixel units
if hasattr(self._converter, '_ang_size'):
pixscale = self._converter.to_angular(1 * u.pix)
ang_conv = True
else:
pixscale = 1.0 * u.deg
ang_conv = False
dists, profiles = filament_profile(skels, input_image, pixscale,
max_dist=max_dist,
distance=None,
fit_profiles=False,
bright_unit=input_image.unit)
# First put the distances into pixel units
if ang_conv:
dists = [self._converter.to_pixel(dist) for dist in dists]
else:
# Already in pixel units.
dists = [dist.value * u.pix for dist in dists]
# Convert the distance units
dists = [self._converter.from_pixel(dist, xunit) for dist in dists]
return dists, profiles
def radprof_table(self, xunit=u.pix):
'''
Return the radial profile as a table.
Parameters
----------
xunit : `~astropy.units.Unit`, optional
Spatial unit to convert radial profile distances.
Returns
-------
tab : `~astropy.table.Table`
Table with the radial profile distance and values.
'''
from astropy.table import Column, Table
dists = Column(self._converter.from_pixel(self._radprofile[0], xunit))
vals = Column(self._radprofile[1])
tab = Table()
tab['distance'] = dists
tab['values'] = vals
return tab
def branch_table(self, include_rht=False):
'''
Save the branch properties of the filament.
Parameters
----------
include_rht : bool, optional
If `branches=True` is used in `Filament2D.exec_rht`, the branch
orientation and curvature will be added to the table.
Returns
-------
tab : `~astropy.table.Table`
Table with the branch properties.
'''
from astropy.table import Table, Column
branch_data = self.branch_properties.copy()
del branch_data['pixels']
del branch_data['number']
if include_rht:
branch_data['orientation'] = self.orientation_branches
branch_data['curvature'] = self.curvature_branches
tab = Table([Column(branch_data[key]) for key in branch_data],
names=branch_data.keys())
return tab
def save_fits(self, savename, image, pad_size=20 * u.pix, header=None,
**model_kwargs):
'''
Save a stamp of the image centered on the filament, the skeleton,
the longest path skeleton, and the model.
Parameters
----------
image : `~numpy.ndarray` or `~astropy.units.Quantity`
The image from which the filament was extracted.
pad_size : `~astropy.units.Quantity`, optional
Size to pad the saved arrays by.
header : `~astropy.io.fits.Header`, optional
Provide a FITS header to save to. If `~Filament2D` was
given WCS information, this will be used if no header is given.
model_kwargs : Passed to `~Filament2D.model_image`.
'''
pad_size = int(self._converter.to_pixel(pad_size).value)
# Do we need to pad the image before slicing?
input_image = pad_image(image, self.pixel_extents, pad_size)
skels = self.skeleton(pad_size=pad_size, out_type='all')
skels_lp = self.skeleton(pad_size=pad_size, out_type='longpath')
# If the padded image matches the mask size, don't need additional
# slicing
if input_image.shape != skels.shape:
input_image = self.image_slicer(input_image, skels.shape,
pad_size=pad_size)
model = self.model_image(max_radius=pad_size * u.pix,
**model_kwargs)
if hasattr(model, 'unit'):
model = model.value
from astropy.io import fits
import time
if header is None:
if hasattr(self._converter, "_wcs"):
header = self._converter._wcs.to_header()
else:
header = fits.Header()
# Strip off units if the image is a Quantity
if hasattr(input_image, 'unit'):
input_image = input_image.value.copy()
hdu = fits.PrimaryHDU(input_image, header)
skel_hdr = header.copy()
skel_hdr['BUNIT'] = ("", "bool")
skel_hdr['COMMENT'] = "Skeleton created by fil_finder on " + \
time.strftime("%c")
skel_hdu = fits.ImageHDU(skels.astype(int), skel_hdr)
skel_lp_hdu = fits.ImageHDU(skels_lp.astype(int), skel_hdr)
model_hdu = fits.ImageHDU(model, header)
hdulist = fits.HDUList([hdu, skel_hdu, skel_lp_hdu, model_hdu])
hdulist.writeto(savename)
def to_pickle(self, savename):
'''
Save a Filament2D class as a pickle file.
Parameters
----------
savename : str
Name of the pickle file.
'''
with open(savename, 'wb') as output:
pickle.dump(self, output, -1)
@staticmethod
def from_pickle(filename):
'''
Load a Filament2D from a pickle file.
Parameters
----------
filename : str
Name of the pickle file.
'''
with open(filename, 'rb') as input:
self = pickle.load(input)
return self
class Filament3D(FilamentNDBase):
"""docstring for Filament3D"""
def __init__(self, arg):
super(Filament3D, self).__init__()
self.arg = arg
| [
"[email protected]"
] | |
656b9a478e48b1c9114cb46915cfa1113d2c3a9e | 651a296c8f45b5799781fd78a6b5329effe702a0 | /polpak/bell_values.py | 22a15e0802b2391a06bf53d6f330732079415995 | [] | no_license | pdhhiep/Computation_using_Python | 095d14370fe1a01a192d7e44fcc81a52655f652b | 407ed29fddc267950e9860b8bbd1e038f0387c97 | refs/heads/master | 2021-05-29T12:35:12.630232 | 2015-06-27T01:05:17 | 2015-06-27T01:05:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,625 | py | #!/usr/bin/env python
#
def bell_values ( n_data ):
#*****************************************************************************80
#
## BELL_VALUES returns some values of the Bell numbers.
#
# Discussion:
#
# The Bell number B(N) is the number of restricted growth functions on N.
#
# Note that the Stirling numbers of the second kind, S^m_n, count the
# number of partitions of N objects into M classes, and so it is
# true that
#
# B(N) = S^1_N + S^2_N + ... + S^N_N.
#
# The Bell numbers were named for Eric Temple Bell.
#
# In Mathematica, the function can be evaluated by
#
# Sum[StirlingS2[n,m],{m,1,n}]
#
# The Bell number B(N) is defined as the number of partitions (of
# any size) of a set of N distinguishable objects.
#
# A partition of a set is a division of the objects of the set into
# subsets.
#
# Example:
#
# There are 15 partitions of a set of 4 objects:
#
# (1234),
# (123) (4),
# (124) (3),
# (12) (34),
# (12) (3) (4),
# (134) (2),
# (13) (24),
# (13) (2) (4),
# (14) (23),
# (1) (234),
# (1) (23) (4),
# (14) (2) (3),
# (1) (24) (3),
# (1) (2) (34),
# (1) (2) (3) (4).
#
# and so B(4) = 15.
#
# First values:
#
# N B(N)
# 0 1
# 1 1
# 2 2
# 3 5
# 4 15
# 5 52
# 6 203
# 7 877
# 8 4140
# 9 21147
# 10 115975
#
# Recursion:
#
# B(I) = sum ( 1 <= J <=I ) Binomial ( I-1, J-1 ) * B(I-J)
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 23 November 2014
#
# Author:
#
# John Burkardt
#
# Reference:
#
# Milton Abramowitz and Irene Stegun,
# Handbook of Mathematical Functions,
# US Department of Commerce, 1964.
#
# Stephen Wolfram,
# The Mathematica Book,
# Fourth Edition,
# Wolfram Media / Cambridge University Press, 1999.
#
# Parameters:
#
# Input/output, integer N_DATA. The user sets N_DATA to 0 before the
# first call. On each call, the routine increments N_DATA by 1, and
# returns the corresponding data; when there is no more data, the
# output value of N_DATA will be 0 again.
#
# Output, integer N, the order of the Bell number.
#
# Output, integer C, the value of the Bell number.
#
import numpy as np
n_max = 11
c_vec = np.array ( ( 1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147, 115975 ) )
n_vec = np.array ( ( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ) )
if ( n_data < 0 ):
n_data = 0
if ( n_max <= n_data ):
n_data = 0
n = 0
c = 0
else:
n = n_vec[n_data]
c = c_vec[n_data]
n_data = n_data + 1
return n_data, n, c
def bell_values_test ( ):
#*****************************************************************************80
#
## BELL_VALUES_TEST demonstrates the use of BELL_VALUES.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 23 November 2014
#
# Author:
#
# John Burkardt
#
print ''
print 'BELL_VALUES_TEST:'
print ' BELL_VALUES returns values of'
print ' the Bell numbers.'
print ''
print ' N BELL(N)'
print ''
n_data = 0
while ( True ):
n_data, n, c = bell_values ( n_data )
if ( n_data == 0 ):
break
print '%6d %10d' % ( n, c )
print ''
print 'BELL_VALUES_TEST:'
print ' Normal end of execution.'
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
bell_values_test ( )
timestamp ( )
| [
"[email protected]"
] | |
e636e89dc9a0a67ae30601cbdb6cdcf9947fef12 | e4f2aba6cb66ac33c5fc439374e8ef39d0bb0e4a | /Week-2-format-string/Exercise-4.py | 7d00cc9ba3f0f3a488faa705797ac2907d073325 | [] | no_license | AChen24562/Python-QCC | 573f5b545239aa24b8047c74539ca6b3e997faa0 | 1da01b76e209eb9b0d08f0f205d635bc2a149dfd | refs/heads/master | 2023-02-06T23:18:41.850377 | 2020-12-28T12:59:29 | 2020-12-28T12:59:29 | 289,614,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | width = 17
height = 12.0
delimiter = "."
print(width//2, type(width//2))
print(width/2.0, type(width/2.0))
print(height/3, type(height/3))
# delimiter * 5 = '.....', str
print(delimiter * 5, type(delimiter * 5))
| [
"[email protected]"
] | |
307a62915d6949a0d0da070e0c930329d1b02074 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/compute/v20180601/get_log_analytic_export_request_rate_by_interval.py | 3eff60bb02dd42eaf3cbb6765a0555d41fb0c38f | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 3,956 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'GetLogAnalyticExportRequestRateByIntervalResult',
'AwaitableGetLogAnalyticExportRequestRateByIntervalResult',
'get_log_analytic_export_request_rate_by_interval',
]
@pulumi.output_type
class GetLogAnalyticExportRequestRateByIntervalResult:
"""
LogAnalytics operation status response
"""
def __init__(__self__, properties=None):
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter
def properties(self) -> 'outputs.LogAnalyticsOutputResponse':
"""
LogAnalyticsOutput
"""
return pulumi.get(self, "properties")
class AwaitableGetLogAnalyticExportRequestRateByIntervalResult(GetLogAnalyticExportRequestRateByIntervalResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetLogAnalyticExportRequestRateByIntervalResult(
properties=self.properties)
def get_log_analytic_export_request_rate_by_interval(blob_container_sas_uri: Optional[str] = None,
from_time: Optional[str] = None,
group_by_operation_name: Optional[bool] = None,
group_by_resource_name: Optional[bool] = None,
group_by_throttle_policy: Optional[bool] = None,
interval_length: Optional['IntervalInMins'] = None,
location: Optional[str] = None,
to_time: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLogAnalyticExportRequestRateByIntervalResult:
"""
LogAnalytics operation status response
:param str blob_container_sas_uri: SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to.
:param str from_time: From time of the query
:param bool group_by_operation_name: Group query result by Operation Name.
:param bool group_by_resource_name: Group query result by Resource Name.
:param bool group_by_throttle_policy: Group query result by Throttle Policy applied.
:param 'IntervalInMins' interval_length: Interval value in minutes used to create LogAnalytics call rate logs.
:param str location: The location upon which virtual-machine-sizes is queried.
:param str to_time: To time of the query
"""
__args__ = dict()
__args__['blobContainerSasUri'] = blob_container_sas_uri
__args__['fromTime'] = from_time
__args__['groupByOperationName'] = group_by_operation_name
__args__['groupByResourceName'] = group_by_resource_name
__args__['groupByThrottlePolicy'] = group_by_throttle_policy
__args__['intervalLength'] = interval_length
__args__['location'] = location
__args__['toTime'] = to_time
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:compute/v20180601:getLogAnalyticExportRequestRateByInterval', __args__, opts=opts, typ=GetLogAnalyticExportRequestRateByIntervalResult).value
return AwaitableGetLogAnalyticExportRequestRateByIntervalResult(
properties=__ret__.properties)
| [
"[email protected]"
] | |
ae3b13b10359ae08b10f0782054445f49475fc90 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/maxProduct_20200731212441.py | c7bb8018626c41179870e4caa9d8418f760ec486 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | import sys
# time complexity is o(n3 ) and space is o(1)
def maxThree(arr):
if len(arr) < 3:
return -1
maxProduct = -(sys.maxsize -1)
print(maxProduct)
n = len(arr)
for i in range(0,n-2):
for j in range(i+1,n-1):
for k in range(j+1,n):
print('i',arr[i],'j',arr[j],'k',arr[k])
product = arr[i] * arr[j] * arr[k]
if product > maxProduct:
maxProduct = product
return maxProduct
# Optimal solution o(nlogn)
def maxOp(arr):
n = len(arr)
arr.sort()
first = arr[n-1] * arr[n-2] * arr[n-3]
second = arr[0] * arr[1] * arr[n-1]
return max(first,second)
print(maxOp([-5,-5,4,5]))
# O(n) time complexity
| [
"[email protected]"
] | |
1699d7d134745de10adce3f9435f31332bfe41fd | 635cb7fb75048f9de7b95b48d1f59de68f9b3368 | /R09/używanie_metaklas_do_kontrolowania_tworzenia_obiektów/example1.py | e76bc265bf850d4e8c8757ec5aaa9bafea6fbc7d | [] | no_license | anpadoma/python_receptury3 | 9e889ac503e48eb62160050eecfdc4a64072c184 | c761f2c36707785a8a70bdaccebd7533c76dee21 | refs/heads/master | 2021-01-22T14:38:34.718999 | 2014-01-31T22:09:44 | 2014-01-31T22:09:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | # example1.py
#
# Bezpośrednie tworzenie obiektów jest niedozwolone
class NoInstances(type):
def __call__(self, *args, **kwargs):
raise TypeError("Nie można bezpośrednio tworzyć obiektu")
class Spam(metaclass=NoInstances):
@staticmethod
def grok(x):
print('Spam.grok')
if __name__ == '__main__':
try:
s = Spam()
except TypeError as e:
print(e)
Spam.grok(42)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.