id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
6432229
|
import json
import logging
import os
from unittest.mock import patch
import wikimedia_commons as wmc
RESOURCES = os.path.join(
os.path.abspath(os.path.dirname(__file__)), 'tests/resources/wikimedia'
)
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s: %(message)s',
level=logging.DEBUG,
)
def test_derive_timestamp_pair():
# Note that the timestamps are derived as if input was in UTC.
actual_start_ts, actual_end_ts = wmc._derive_timestamp_pair('2018-01-15')
assert actual_start_ts == '1515974400'
assert actual_end_ts == '1516060800'
def test_get_image_pages_returns_correctly_with_continue():
with open(
os.path.join(RESOURCES, 'response_small_with_continue.json')
) as f:
resp_dict = json.load(f)
expect_result = {
'84798633': {
'pageid': 84798633,
'title': 'File:Ambassade1.jpg'
}
}
actual_result = wmc._get_image_pages(resp_dict)
assert actual_result == expect_result
def test_get_image_pages_returns_correctly_with_none_json():
expect_result = None
actual_result = wmc._get_image_pages(None)
assert actual_result == expect_result
def test_get_image_pages_returns_correctly_with_no_pages():
expect_result = None
actual_result = wmc._get_image_pages({'batch_complete': ''})
assert actual_result == expect_result
def test_build_query_params_adds_start_and_end():
actual_qp = wmc._build_query_params(
'abc', 'def', default_query_params={}
)
assert actual_qp['gaistart'] == 'abc'
assert actual_qp['gaiend'] == 'def'
def test_build_query_params_leaves_other_keys():
actual_qp = wmc._build_query_params(
'abc', 'def', default_query_params={'test': 'value'}
)
assert actual_qp['test'] == 'value'
def test_build_query_params_adds_continue():
actual_qp = wmc._build_query_params(
'abc',
'def',
{'continuetoken': 'next.jpg'},
default_query_params={'test': 'value'}
)
assert actual_qp['continuetoken'] == 'next.jpg'
def test_get_image_batch(monkeypatch):
with open(
os.path.join(RESOURCES, 'continuation', 'wmc_pretty1.json')
) as f:
first_response = json.load(f)
with open(
os.path.join(RESOURCES, 'continuation', 'wmc_pretty2.json')
) as f:
second_response = json.load(f)
with open(
os.path.join(RESOURCES, 'continuation', 'wmc_pretty3.json')
) as f:
third_response = json.load(f)
def mock_get_response_json(endpoint, retries, query_params, **kwargs):
continue_one = 'Edvard_Munch_-_Night_in_Nice_(1891).jpg|nowiki|1281339'
continue_two = 'Niedercunnersdorf_Gartenweg_12.JPG|dewiki|9849507'
if 'continue' not in query_params:
return first_response
elif query_params['gucontinue'] == continue_one:
return second_response
elif query_params['gucontinue'] == continue_two:
return third_response
else:
return None
with open(
os.path.join(RESOURCES, 'continuation', 'wmc_pretty123.json')
) as f:
expect_image_batch = json.load(f)
expect_image_batch.pop('continue')
expect_continue_token = {
'gaicontinue': "20151031230201|Lancelot_'Capability'_BROWN_-_Wilderness_House_Moat_Lane_Hampton_Court_Palace_Hampton_Court_London_KT8_9AR.jpg",
'continue': 'gaicontinue||'
}
monkeypatch.setattr(wmc.delayed_requester, 'get_response_json',
mock_get_response_json)
actual_image_batch, actual_continue_token = wmc._get_image_batch(
'2019-01-01', '2019-01-02'
)
assert actual_image_batch == expect_image_batch
assert actual_continue_token == expect_continue_token
def test_get_image_batch_returns_correctly_without_continue(monkeypatch):
with open(
os.path.join(RESOURCES, 'response_small_missing_continue.json')
) as f:
resp_dict = json.load(f)
with patch.object(
wmc.delayed_requester,
'get_response_json',
return_value=resp_dict
) as mock_response_json:
actual_result, actual_continue = wmc._get_image_batch(
'2019-01-01', '2019-01-02', retries=2
)
expect_result = resp_dict
expect_continue = {}
mock_response_json.assert_called_once()
assert actual_continue == expect_continue
assert actual_result == expect_result
def test_merge_response_jsons():
with open(
os.path.join(RESOURCES, 'continuation', 'wmc_pretty1.json')
) as f:
left_response = json.load(f)
with open(
os.path.join(RESOURCES, 'continuation', 'wmc_pretty2.json')
) as f:
right_response = json.load(f)
with open(
os.path.join(RESOURCES, 'continuation', 'wmc_pretty1plus2.json')
) as f:
expect_merged_response = json.load(f)
actual_merged_response = wmc._merge_response_jsons(
left_response,
right_response,
)
assert actual_merged_response == expect_merged_response
def test_merge_image_pages_left_only_with_gu():
with open(
os.path.join(RESOURCES, 'continuation', 'page_44672185_left.json')
) as f:
left_page = json.load(f)
with open(
os.path.join(RESOURCES, 'continuation', 'page_44672185_right.json')
) as f:
right_page = json.load(f)
actual_merged_page = wmc._merge_image_pages(left_page, right_page)
assert actual_merged_page == left_page
def test_merge_image_pages_left_only_with_gu_backwards():
with open(
os.path.join(RESOURCES, 'continuation', 'page_44672185_left.json')
) as f:
left_page = json.load(f)
with open(
os.path.join(RESOURCES, 'continuation', 'page_44672185_right.json')
) as f:
right_page = json.load(f)
actual_merged_page = wmc._merge_image_pages(right_page, left_page)
assert actual_merged_page == left_page
def test_merge_image_pages_neither_have_gu():
with open(
os.path.join(RESOURCES, 'continuation', 'page_44672210_left.json')
) as f:
left_page = json.load(f)
with open(
os.path.join(RESOURCES, 'continuation', 'page_44672210_right.json')
) as f:
right_page = json.load(f)
actual_merged_page = wmc._merge_image_pages(left_page, right_page)
assert actual_merged_page == left_page
def test_merge_image_pages_neigher_have_gu_backwards():
with open(
os.path.join(RESOURCES, 'continuation', 'page_44672210_left.json')
) as f:
left_page = json.load(f)
with open(
os.path.join(RESOURCES, 'continuation', 'page_44672210_right.json')
) as f:
right_page = json.load(f)
actual_merged_page = wmc._merge_image_pages(right_page, left_page)
assert actual_merged_page == left_page
def test_merge_image_pages_both_have_gu():
with open(
os.path.join(RESOURCES, 'continuation', 'page_44672212_left.json')
) as f:
left_page = json.load(f)
with open(
os.path.join(RESOURCES, 'continuation', 'page_44672212_right.json')
) as f:
right_page = json.load(f)
with open(
os.path.join(
RESOURCES,
'continuation',
'page_44672212_merged.json'
)
) as f:
expect_merged_page = json.load(f)
actual_merged_page = wmc._merge_image_pages(left_page, right_page)
assert actual_merged_page == expect_merged_page
def test_process_image_data_handles_example_dict():
with open(os.path.join(RESOURCES, 'image_data_example.json')) as f:
image_data = json.load(f)
with patch.object(
wmc.image_store,
'add_item',
return_value=1
) as mock_add:
wmc._process_image_data(image_data)
mock_add.assert_called_once_with(
foreign_landing_url='https://commons.wikimedia.org/w/index.php?curid=81754323',
image_url='https://upload.wikimedia.org/wikipedia/commons/2/25/20120925_PlozevetBretagne_LoneTree_DSC07971_PtrQs.jpg',
license_url='https://creativecommons.org/licenses/by-sa/4.0',
foreign_identifier=81754323,
width=5514,
height=3102,
creator='PtrQs',
creator_url='https://commons.wikimedia.org/wiki/User:PtrQs',
title='File:20120925 PlozevetBretagne LoneTree DSC07971 PtrQs.jpg',
meta_data={'description': 'SONY DSC', 'global_usage_count': 0,
'last_modified_at_source': '2019-09-01 00:38:47',
'date_originally_created': '2012-09-25 16:23:02'}
)
def test_process_image_data_throws_out_invalid_mediatype(monkeypatch):
image_data = {'mediatype': 'INVALID'}
def mock_check_mediatype(image_info):
return False
monkeypatch.setattr(wmc, '_check_mediatype', mock_check_mediatype)
with patch.object(
wmc.image_store,
'add_item',
return_value=1
) as mock_add:
wmc._process_image_data(image_data)
mock_add.assert_not_called()
def test_get_image_info_dict():
with open(os.path.join(RESOURCES, 'image_data_example.json')) as f:
image_data = json.load(f)
with open(
os.path.join(RESOURCES, 'image_info_from_example_data.json')
) as f:
expect_image_info = json.load(f)
actual_image_info = wmc._get_image_info_dict(image_data)
assert actual_image_info == expect_image_info
def test_check_mediatype_with_valid_image_info():
with open(
os.path.join(RESOURCES, 'image_info_from_example_data.json')
) as f:
image_info = json.load(f)
valid_mediatype = wmc._check_mediatype(image_info)
assert valid_mediatype is True
def test_check_mediatype_with_invalid_mediatype_in_image_info():
with open(
os.path.join(RESOURCES, 'image_info_from_example_data.json')
) as f:
image_info = json.load(f)
image_info.update(mediatype='INVALIDTYPE')
valid_mediatype = wmc._check_mediatype(image_info)
assert valid_mediatype is False
def test_extract_creator_info_handles_plaintext():
with open(os.path.join(RESOURCES, 'image_info_artist_string.json')) as f:
image_info = json.load(f)
actual_creator, actual_creator_url = wmc._extract_creator_info(image_info)
expect_creator = 'Artist Name'
expect_creator_url = None
assert expect_creator == actual_creator
assert expect_creator_url == actual_creator_url
def test_extract_creator_info_handles_well_formed_link():
with open(os.path.join(RESOURCES, 'image_info_artist_link.json')) as f:
image_info = json.load(f)
actual_creator, actual_creator_url = wmc._extract_creator_info(image_info)
expect_creator = 'link text'
expect_creator_url = 'https://test.com/linkspot'
assert expect_creator == actual_creator
assert expect_creator_url == actual_creator_url
def test_extract_creator_info_handles_div_with_no_link():
with open(os.path.join(RESOURCES, 'image_info_artist_div.json')) as f:
image_info = json.load(f)
actual_creator, actual_creator_url = wmc._extract_creator_info(image_info)
expect_creator = '<NAME>'
expect_creator_url = None
assert expect_creator == actual_creator
assert expect_creator_url == actual_creator_url
def test_extract_creator_info_handles_internal_wc_link():
with open(
os.path.join(RESOURCES, 'image_info_artist_internal_link.json')
) as f:
image_info = json.load(f)
actual_creator, actual_creator_url = wmc._extract_creator_info(image_info)
expect_creator = 'NotaRealUser'
expect_creator_url = 'https://commons.wikimedia.org/w/index.php?title=User:NotaRealUser&action=edit&redlink=1'
assert expect_creator == actual_creator
assert expect_creator_url == actual_creator_url
def test_extract_creator_info_handles_link_as_partial_text():
with open(
os.path.join(RESOURCES, 'image_info_artist_partial_link.json')
) as f:
image_info = json.load(f)
actual_creator, actual_creator_url = wmc._extract_creator_info(image_info)
expect_creator = 'Jeff & Brian from Eastbourne'
expect_creator_url = 'https://www.flickr.com/people/16707908@N07'
assert expect_creator == actual_creator
assert expect_creator_url == actual_creator_url
def test_get_license_url_finds_license_url():
with open(
os.path.join(RESOURCES, 'image_info_from_example_data.json')
) as f:
image_info = json.load(f)
expect_license_url = 'https://creativecommons.org/licenses/by-sa/4.0'
actual_license_url = wmc._get_license_url(image_info)
assert actual_license_url == expect_license_url
def test_get_license_url_handles_missing_license_url():
with open(
os.path.join(RESOURCES, 'image_info_artist_partial_link.json')
) as f:
image_info = json.load(f)
expect_license_url = ''
actual_license_url = wmc._get_license_url(image_info)
assert actual_license_url == expect_license_url
def test_create_meta_data_scrapes_text_from_html_description():
with open(
os.path.join(RESOURCES, 'image_data_html_description.json')
) as f:
image_data = json.load(f)
expect_description = 'Identificatie Titel(s): Allegorie op kunstenaar <NAME>, bekend als Parmigianino'
actual_description = wmc._create_meta_data_dict(image_data)['description']
assert actual_description == expect_description
def test_create_meta_data_tallies_global_usage_count():
with open(
os.path.join(
RESOURCES,
'continuation',
'page_44672185_left.json')
) as f:
image_data = json.load(f)
actual_gu = wmc._create_meta_data_dict(image_data)['global_usage_count']
expect_gu = 3
assert actual_gu == expect_gu
def test_create_meta_data_tallies_zero_global_usage_count():
with open(
os.path.join(
RESOURCES,
'continuation',
'page_44672185_right.json')
) as f:
image_data = json.load(f)
actual_gu = wmc._create_meta_data_dict(image_data)['global_usage_count']
expect_gu = 0
assert actual_gu == expect_gu
|
StarcoderdataPython
|
1817333
|
# internal to the class is
# - the matrix used for calulation
# - the dict mapping Cn to concept name
# - a list of concepts
# - a list of relations
# only add, no delete
#
import numpy as np
import itertools
import json
import networkx as nx
import math
class Concept:
def __init__(self, node, name, wordcloud, type_):
self.id = node
self.name = name,
self.wordcloud = wordcloud,
self.adjacent = {}
self.type = type_ # "input, state, or output"
def __str__(self):
return str(self.id) + ' adjacent: ' + str([x.id for x in self.adjacent])
def add_neighbor(self, neighbor, weight=0):
self.adjacent[neighbor] = weight
def get_connections(self):
return self.adjacent.keys()
def get_name(self):
return self.name
def get_id(self):
return self.id
def get_weight(self, neighbor):
return self.adjacent[neighbor]
def set_type(self, type_):
if type_ != "input" or type_ != "state" or type_ != "output":
return "ERR: type must be input, output, or state. Setting to 'state'."
else:
self.type = type_
def get_type(self):
return self.type
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__)
class Edge:
def __init__(self, id_, from_node, to_node, weight):
self.id = id_
self.from_node = from_node
self.to = to_node
self.weight = weight
class FCM:
def __init__(self):
self.vert_dict = {} # dictionary of concepts
self.connection_dict = {} # dictionary of connections
self.correlation_matrix = [[]] # used for calculations & output
self.ordered_concepts = np.array([])
self.cosmos_connection = "" # if we want to get/set data from cosmos, set a connection
self.squasher = "tanh" # an enum indicating which squasher to use
self.low_bound = 0 # low bound for input values
self.high_bound = 1 # high bound for input values
self.fuzz_range = 2 # number of steps for inputs
self.graph = nx.DiGraph() # internal representation of the graph
def __iter__(self):
return iter(self.vert_dict.values())
def add_concept(self, node):
new_concept = node
self.vert_dict[node.id] = new_concept
self.ordered_concepts = np.append(self.ordered_concepts, [node.id])
self.graph.add_node(node.id)
if len(self.vert_dict) == 1:
self.correlation_matrix = np.append(self.correlation_matrix, [0])
else:
new_h_row = np.tile(0, len(self.vert_dict) - 1)
self.correlation_matrix = np.column_stack((self.correlation_matrix, new_h_row))
new_v_row = np.tile(0, len(self.vert_dict))
self.correlation_matrix = np.vstack((self.correlation_matrix, new_v_row))
return new_concept
# update the graph and correlation matrix
def add_connection(self, from_node, to_node, weight):
from_index = np.where(self.ordered_concepts == from_node.id)
to_index = np.where(self.ordered_concepts == to_node.id)
self.correlation_matrix[from_index[0][0]][to_index[0][0]] = weight
self.graph.add_weighted_edges_from([(from_node.id, to_node.id, weight)])
# return the NetworkX representation of the graph -> https://networkx.org/
def get_graph(self):
return self.graph
# pass in a dictionary of concept ids/values, handle clamping
def calculate(self, input_dict, max_iterations):
# input_weights are an array of weights used to run the shifiz
input_weights = np.tile(0.0,len(self.vert_dict))
# default clamps are false
input_clamps = np.zeros(len(self.vert_dict), dtype=bool)
counter = 0
# order the input weights to correlate to the matrix
for i in self.ordered_concepts:
input_weights[counter] = input_dict.get(i).get("in")
if input_dict.get(i).get("clamp"):
input_clamps[counter] = True
counter = counter + 1
result = []
input_ec = input_weights
result.append(input_ec) # input weights are always the first value
# if an input is clamped, keep it at it's value
for i in range(max_iterations):
input_ec = np.tanh(input_ec - input_ec.dot(self.correlation_matrix))
ec_count = 0
for c in input_ec:
if input_clamps[ec_count]:
input_ec[ec_count] = input_weights[ec_count]
ec_count += 1
result.append(input_ec)
# Check the euclidean distance for the last 3 iterations. If it's less than the stability threshold, stop
stable_threshold = 0.1 # TODO this should be a configuration variable
if len(result) > 3:
stable_check_1 = result[len(result)-1]
stable_check_2 = result[len(result)-2]
stable_check_3 = result[len(result)-3]
dist_1 = math.dist(stable_check_1, stable_check_2)
dist_2 = math.dist(stable_check_1, stable_check_3)
if dist_1 < stable_threshold and dist_2 < stable_threshold:
break
# TODO below
# - update DF to be the output they want...
# -
return result
def set_squasher(self, squasher):
if squasher == "tanh":
self.squasher = "tanh"
elif squasher == "sigmoid":
self.squasher = "sigmoid"
else:
self.squasher = "tanh"
def get_matplotlib_labels(self): # return 2 arrays, one with ordered c numbers, one with corresponding names
c_numbers = []
c_names = []
counter = 0
for i in self.ordered_concepts:
print(i)
current_concept = self.vert_dict.get(i)
c_numbers.append("c" + str(counter))
c_names.append(current_concept.name[0])
counter = counter + 1
return {"concept_numbers": c_numbers, "concept_names": c_names}
def output_gremlin(self):
print("gremy")
# return a gremlin query that can be used to save to gremlin
# return json that can be used for d3 graph
def output_d3(self):
print("d3")
d3_json = {}
nodes = []
links = []
correlations = np.array(self.correlation_matrix)
row_counter = 0
for r in correlations:
from_concept = self.vert_dict[self.ordered_concepts[row_counter]]
nodes.append({"name": from_concept.get_name()[0], "id": from_concept.get_id()})
col_counter = 0
for c in r:
if c > 0:
to_concept = self.vert_dict[self.ordered_concepts[col_counter]]
links.append({"from": from_concept.get_id(), "to": to_concept.get_id(), "weight": c})
col_counter += 1
row_counter += 1
d3_json["nodes"] = nodes
d3_json["links"] = links
return json.dumps(d3_json)
# run every possible scenario for this model
# (1) find all the possible input values given linguistic inputs
# (2) create the cartesian product to get every possible input
# (3) use these as inputs, run the model a ton
def run_all_possible_scenarios(self, max_iterations):
low_val = self.low_bound
high_val = self.high_bound
fuzz_count = self.fuzz_range
# initialize list will potential values in range
list_1 = np.linspace(low_val, high_val, num=fuzz_count)
# get the cartesian product to calculate all possible inputs
input_concepts = {}
for i in self.ordered_concepts:
if self.vert_dict.get(i).get_type() == "in":
input_concepts[i] = self.vert_dict.get(i)
print("Input concept count:")
print(str(len(input_concepts)))
print("Total concept count:")
print(str(len(self.vert_dict)))
if len(input_concepts) < 1:
raise Exception("You must have at least one input concept!")
# unique_combinations = list(itertools.product(list_1, repeat=len(self.vert_dict)))
# TODO return an error if there are no concepts marked as input
unique_combinations = list(itertools.product(list_1, repeat=int(len(input_concepts))))
print("Unique Combos")
print(len(unique_combinations))
# unique_combinations_slice = unique_combinations[:100]
# unique_combinations = unique_combinations_slice
# this is a hack to get through for now...
only_five = []
for u in unique_combinations:
five_check = sum(u)
if 0 < five_check < 6:
only_five.append(u)
unique_combinations = only_five
print("Only fives:")
print(len(unique_combinations))
all_inputs = []
for u in unique_combinations:
counter = 0
this_input = {}
for i in self.ordered_concepts:
# if this is an input clamp it
if self.vert_dict.get(i).get_type() == "in":
this_input[i] = { "in": u[counter], "clamp": True }
counter = counter + 1
else:
this_input[i] = { "in": 0, "clamp": False }
all_inputs.append(this_input)
all_possible_outputs = []
for a in all_inputs:
all_possible_outputs.append(self.calculate(a, max_iterations))
return all_possible_outputs
# Import CSV for matrix and for concepts
def create_from_csv(self, correlation_matrix, node_definitions):
# TODO add connections
for node in node_definitions:
c = Concept(node[1], node[0], "", node[2])
new_concept = c
self.vert_dict[c.id] = new_concept
self.ordered_concepts = np.append(self.ordered_concepts, [c.id])
self.graph.add_node(c.id)
self.correlation_matrix = correlation_matrix
def scenario_as_text(self):
print("hm")
# convert
|
StarcoderdataPython
|
9713586
|
from ..rfc6749 import TokenEndpoint
from ..rfc6749 import (
OAuth2Error, InvalidRequestError, UnsupportedTokenTypeError
)
class RevocationEndpoint(TokenEndpoint):
"""Implementation of revocation endpoint which is described in
`RFC7009`_.
.. _RFC7009: https://tools.ietf.org/html/rfc7009
"""
#: Endpoint name to be registered
ENDPOINT_NAME = 'revocation'
def validate_endpoint_request(self):
"""The client constructs the request by including the following
parameters using the "application/x-www-form-urlencoded" format in
the HTTP request entity-body:
token
REQUIRED. The token that the client wants to get revoked.
token_type_hint
OPTIONAL. A hint about the type of the token submitted for
revocation.
"""
if self.request.body_params:
params = dict(self.request.body_params)
else:
params = dict(self.request.query_params)
if 'token' not in params:
raise InvalidRequestError()
token_type = params.get('token_type_hint')
if token_type and token_type not in self.SUPPORTED_TOKEN_TYPES:
raise UnsupportedTokenTypeError()
token = self.query_token(
params['token'], token_type, self.request.client)
if not token:
raise InvalidRequestError()
self.request.credential = token
def create_endpoint_response(self):
"""Validate revocation request and create the response for revocation.
For example, a client may request the revocation of a refresh token
with the following request::
POST /revoke HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
token=45ghiukldjahdnhzdauz&token_type_hint=refresh_token
:returns: (status_code, body, headers)
"""
try:
# The authorization server first validates the client credentials
self.authenticate_endpoint_client()
# then verifies whether the token was issued to the client making
# the revocation request
self.validate_endpoint_request()
# the authorization server invalidates the token
self.revoke_token(self.request.credential)
self.server.send_signal(
'after_revoke_token',
token=self.request.credential,
client=self.request.client,
)
status = 200
body = {}
headers = [
('Content-Type', 'application/json'),
('Cache-Control', 'no-store'),
('Pragma', 'no-cache'),
]
except OAuth2Error as error:
status = error.status_code
body = dict(error.get_body())
headers = error.get_headers()
return status, body, headers
def query_token(self, token, token_type_hint, client):
"""Get the token from database/storage by the given token string.
Developers should implement this method::
def query_token(self, token, token_type_hint, client):
if token_type_hint == 'access_token':
return Token.query_by_access_token(token, client.client_id)
if token_type_hint == 'refresh_token':
return Token.query_by_refresh_token(token, client.client_id)
return Token.query_by_access_token(token, client.client_id) or \
Token.query_by_refresh_token(token, client.client_id)
"""
raise NotImplementedError()
def revoke_token(self, token):
"""Mark token as revoked. Since token MUST be unique, it would be
dangerous to delete it. Consider this situation:
1. Jane obtained a token XYZ
2. Jane revoked (deleted) token XYZ
3. Bob generated a new token XYZ
4. Jane can use XYZ to access Bob's resource
It would be secure to mark a token as revoked::
def revoke_token(self, token):
token.revoked = True
session.add(token)
session.commit()
"""
raise NotImplementedError()
|
StarcoderdataPython
|
3218482
|
"""
Provide a mock binary sensor platform.
Call init before using it in your tests to ensure clean test data.
"""
from homeassistant.components.binary_sensor import DEVICE_CLASSES, BinarySensorEntity
from tests.common import MockEntity
ENTITIES = {}
def init(empty=False):
"""Initialize the platform with entities."""
global ENTITIES
ENTITIES = (
{}
if empty
else {
device_class: MockBinarySensor(
name=f"{device_class} sensor",
is_on=True,
unique_id=f"unique_{device_class}",
device_class=device_class,
)
for device_class in DEVICE_CLASSES
}
)
async def async_setup_platform(
hass, config, async_add_entities_callback, discovery_info=None
):
"""Return mock entities."""
async_add_entities_callback(list(ENTITIES.values()))
class MockBinarySensor(MockEntity, BinarySensorEntity):
"""Mock Binary Sensor class."""
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._handle("is_on")
@property
def device_class(self):
"""Return the class of this sensor."""
return self._handle("device_class")
|
StarcoderdataPython
|
5174026
|
import math
print(math.ceil(3.9))
print(math.floor(3.9))
x = 3.9
print(round(x))
x = 3.9
print(abs(-3.9))
|
StarcoderdataPython
|
5121890
|
<filename>scripts/pyautogui-full-vs-region.py
#!/usr/bin/env python
"""This file illustrates the similar run time between regions and full screenshots"""
from __future__ import print_function
import time
import pyautogui
print('Using a region')
START = time.time()
SCREEN_WIDTH, SCREEN_HEIGHT = pyautogui.size()
MOUSE_X, MOUSE_Y = pyautogui.position()
PIXEL = pyautogui.screenshot(
region=(
MOUSE_X, MOUSE_Y, 1, 1
)
)
COLOR = PIXEL.getcolors()
END = time.time()
print("Screen: %dx%d" % (SCREEN_WIDTH, SCREEN_HEIGHT))
print("Mouse: (%d,%d)" % (MOUSE_X, MOUSE_Y))
print("RGB: %s" % (COLOR[0][1].__str__()))
print("Start: %s" % (START))
print("End: %s" % (END))
print("Difference: %s" % (END - START))
print("=" * 10)
print('Full screen')
START = time.time()
SCREEN_WIDTH, SCREEN_HEIGHT = pyautogui.size()
MOUSE_X, MOUSE_Y = pyautogui.position()
PIXEL = pyautogui.screenshot()
COLOR = PIXEL.getcolors()
END = time.time()
print("Screen: %dx%d" % (SCREEN_WIDTH, SCREEN_HEIGHT))
print("Mouse: (%d,%d)" % (MOUSE_X, MOUSE_Y))
print("Start: %s" % (START))
print("End: %s" % (END))
print("Difference: %s" % (END - START))
|
StarcoderdataPython
|
11242990
|
<reponame>DEvHiII/aoc-2018
import re
import datetime as datetime
class Parser:
# [1518-11-01 00:00] Guard #10 begins shift
# [1518-11-01 00:05] falls asleep
# [1518-11-01 00:25] wakes up
def parse(self, line):
expression = '^\\[([-0-9: ]+)\\] (Guard #([0-9]+) begins shift|falls asleep|wakes up)'
entry = re.search(expression, line)
if (entry.group(2) == "falls asleep"):
type = "asleep"
elif (entry.group(2) == "wakes up"):
type = "wakeup"
else:
type = "start"
if (entry.group(3) is None):
guardNumber = -1
else:
guardNumber = int(entry.group(3))
return [
datetime.datetime.strptime(entry.group(1), '%Y-%m-%d %H:%M'),
type,
guardNumber
]
def readLog(self, fileName):
def sortFunc(e):
return e[0]
input = open(fileName).readlines()
log = []
for line in input:
log.append(self.parse(line))
log.sort(key=sortFunc)
return log
|
StarcoderdataPython
|
193620
|
<reponame>numb3r33/Kaggle_Home_Credit<filename>src/v71.py
import pandas as pd
import numpy as np
import scipy as sp
import argparse
import os
import gc
import time
from base import *
from features import *
from datetime import datetime
from sklearn.externals import joblib
from sklearn.model_selection import cross_val_score, StratifiedKFold
basepath = os.path.expanduser('../')
SEED = 1231
np.random.seed(SEED)
#############################################################################################################
# EXPERIMENT PARAMETERS #
#############################################################################################################
COLS_TO_REMOVE = ['SK_ID_CURR',
'TARGET',
'OCCUPATION_TYPE__5',
'OCCUPATION_TYPE__-1',
'OCCUPATION_TYPE__11',
'OCCUPATION_TYPE__15',
'ORGANIZATION_TYPE__29',
'ORGANIZATION_TYPE__5',
'FLAG_OWN_REALTY',
'FLAG_DOCUMENT_21',
'ORGANIZATION_TYPE__21',
'FLAG_DOCUMENT_14',
'ORGANIZATION_TYPE__17',
'ORGANIZATION_TYPE__27',
'ORGANIZATION_TYPE__32',
'FLAG_DOCUMENT_16',
'ORGANIZATION_TYPE__47',
'FLAG_DOCUMENT_13',
'FLAG_DOCUMENT_11',
'ORGANIZATION_TYPE__40',
'ORGANIZATION_TYPE__23',
'ORGANIZATION_TYPE__14',
'diff_max_min_credit_term',
'ORGANIZATION_TYPE__1',
'ORGANIZATION_TYPE__9',
'OCCUPATION_TYPE__nan',
'ORGANIZATION_TYPE__41',
'OCCUPATION_TYPE__7',
'FLAG_MOBIL',
'ORGANIZATION_TYPE__18',
'ORGANIZATION_TYPE__38',
'ORGANIZATION_TYPE__44',
'FLAG_DOCUMENT_12',
'ORGANIZATION_TYPE__0',
'FLAG_DOCUMENT_2',
'ORGANIZATION_TYPE__13',
'OCCUPATION_TYPE__0',
'FLAG_DOCUMENT_4',
'OCCUPATION_TYPE__16',
'ORGANIZATION_TYPE__49',
'FLAG_DOCUMENT_6',
'FLAG_DOCUMENT_9',
'ORGANIZATION_TYPE__nan',
'OCCUPATION_TYPE__12',
'ORGANIZATION_TYPE__20',
'FLAG_CONT_MOBILE',
'ORGANIZATION_TYPE__37',
'ORGANIZATION_TYPE__45',
'FLAG_EMP_PHONE',
'FLAG_DOCUMENT_17',
'LIVE_REGION_NOT_WORK_REGION',
'OCCUPATION_TYPE__17',
'NAME_TYPE_SUITE',
'ORGANIZATION_TYPE__15',
'REG_REGION_NOT_LIVE_REGION',
'FLAG_DOCUMENT_10',
'ORGANIZATION_TYPE__3',
'OCCUPATION_TYPE__2',
'ORGANIZATION_TYPE__19',
'FLAG_DOCUMENT_19',
'AMT_REQ_CREDIT_BUREAU_DAY',
'credits_ended_bureau',
'ORGANIZATION_TYPE__8',
'ORGANIZATION_TYPE__16',
'FLAG_DOCUMENT_8',
'ORGANIZATION_TYPE__25',
'OCCUPATION_TYPE__6',
'NUM_NULLS_EXT_SCORES',
'ORGANIZATION_TYPE__48',
'ORGANIZATION_TYPE__53',
'ORGANIZATION_TYPE__10',
'FLAG_DOCUMENT_7',
'ORGANIZATION_TYPE__55',
'ORGANIZATION_TYPE__24',
'NAME_EDUCATION_TYPE__0',
'ORGANIZATION_TYPE__46',
'ELEVATORS_MODE',
'NAME_EDUCATION_TYPE__nan',
'ORGANIZATION_TYPE__22',
'ORGANIZATION_TYPE__50',
'REG_REGION_NOT_WORK_REGION',
'ORGANIZATION_TYPE__56',
'FLAG_DOCUMENT_5',
'FLAG_DOCUMENT_20',
'ORGANIZATION_TYPE__2',
'ORGANIZATION_TYPE__6',
'OCCUPATION_TYPE__13',
'ORGANIZATION_TYPE__52',
'FLAG_DOCUMENT_15',
'ORGANIZATION_TYPE__43',
'AMT_REQ_CREDIT_BUREAU_HOUR',
'NAME_HOUSING_TYPE',
'ORGANIZATION_TYPE__11',
'HOUSETYPE_MODE',
'EMERGENCYSTATE_MODE',
'ORGANIZATION_TYPE__28',
'NAME_EDUCATION_TYPE__2',
'ORGANIZATION_TYPE__4',
'OCCUPATION_TYPE__14',
'ORGANIZATION_TYPE__35',
'LIVE_CITY_NOT_WORK_CITY',
'num_diff_credits',
'ORGANIZATION_TYPE__51',
'REG_CITY_NOT_WORK_CITY',
'FLAG_EMAIL',
'ORGANIZATION_TYPE__57',
'NAME_HOUSING_TYPE__0',
'NAME_INCOME_TYPE__2',
'NAME_INCOME_TYPE__5',
'NAME_HOUSING_TYPE__nan',
'NAME_INCOME_TYPE__nan',
'NAME_INCOME_TYPE__0',
'NAME_INCOME_TYPE__6',
'NAME_CONTRACT_STATUS_3',
'NAME_INCOME_TYPE__3',
'diff_balance_curr_credit',
'ratio_min_installment_balance',
'NAME_HOUSING_TYPE__4',
'CODE_REJECT_REASON_5',
'CODE_REJECT_REASON_8',
'ORGANIZATION_TYPE__33',
'CODE_REJECT_REASON_0',
'OCCUPATION_TYPE__1',
'NAME_HOUSING_TYPE__5',
'sum_num_times_prolonged',
'NAME_GOODS_CATEGORY_13',
'NAME_GOODS_CATEGORY_4',
'NAME_GOODS_CATEGORY_26',
'PRODUCT_COMBINATION_-1',
'NAME_GOODS_CATEGORY_24',
'NAME_GOODS_CATEGORY_15',
'NAME_GOODS_CATEGORY_20',
'NAME_GOODS_CATEGORY_9',
'CODE_REJECT_REASON_6',
'NAME_GOODS_CATEGORY_6',
'NAME_GOODS_CATEGORY_0',
'num_high_int_no_info_loans',
'NAME_HOUSING_TYPE__2',
'NAME_GOODS_CATEGORY_14',
'NAME_GOODS_CATEGORY_17',
'PRODUCT_COMBINATION_16',
'PRODUCT_COMBINATION_15',
'OCCUPATION_TYPE__10',
'PRODUCT_COMBINATION_14',
'NAME_GOODS_CATEGORY_1',
'NAME_GOODS_CATEGORY_12',
'NAME_GOODS_CATEGORY_21',
'NAME_GOODS_CATEGORY_25',
'OCCUPATION_TYPE__9',
'NAME_GOODS_CATEGORY_10',
'NAME_GOODS_CATEGORY_16',
'NAME_GOODS_CATEGORY_8',
'mean_CODE_GENDER_ORGANIZATION_TYPE_DAYS_REGISTRATION',
'FLAG_DOCUMENT_18',
'NAME_GOODS_CATEGORY_18',
'ORGANIZATION_TYPE__30',
'sum_CODE_GENDER_NAME_EDUCATION_TYPE_OWN_CAR_AGE',
'ORGANIZATION_TYPE__12',
'NAME_EDUCATION_TYPE__3',
'ORGANIZATION_TYPE__36',
'ORGANIZATION_TYPE__34'
]
PARAMS = {
'num_boost_round': 5000,
'early_stopping_rounds': 200,
'boosting_type': 'gbdt',
'objective': 'binary',
'learning_rate': .02,
'metric': 'auc',
'max_depth': 8,
'num_leaves': 35,
'sub_feature': .1,
'feature_fraction_seed': SEED,
'min_data_in_leaf': 100,
'max_bin': 300,
'lambda_l2': 100,
'nthread': 4,
'verbose': -1,
'seed': SEED
}
MODEL_FILENAME = 'v71'
SAMPLE_SIZE = .5
# NOTE: column in frequency encoded columns
# cannot be in ohe cols.
FREQ_ENCODING_COLS = ['ORGANIZATION_OCCUPATION',
'age_emp_categorical',
'age_occupation'
]
OHE_COLS = [
'ORGANIZATION_TYPE',
'OCCUPATION_TYPE',
'NAME_EDUCATION_TYPE',
'NAME_HOUSING_TYPE',
'NAME_INCOME_TYPE'
]
class Modelv71(BaseModel):
def __init__(self, **params):
self.params = params
self.n_train = 307511 # TODO: find a way to remove this constant
def load_data(self, filenames):
dfs = []
for filename in filenames:
dfs.append(pd.read_csv(filename, parse_dates=True, keep_date_col=True))
df = pd.concat(dfs)
df.index = np.arange(len(df))
df = super(Modelv71, self).reduce_mem_usage(df)
return df
def preprocess(self):
tr = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'application_train.pkl'))
te = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'application_test.pkl'))
ntrain = len(tr)
data = pd.concat((tr, te))
del tr, te
gc.collect()
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'current_application_train.pkl')):
print('Generating features based on current application ....')
t0 = time.clock()
data, FEATURE_NAMES = current_application_features(data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'current_application_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'current_application_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on current application')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_train.pkl')):
bureau = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'bureau.pkl'))
for col in bureau.select_dtypes(include=['category']).columns:
bureau.loc[:, col] = bureau.loc[:, col].cat.codes
print('Generating features based on credits reported to bureau ....')
t0 = time.clock()
data, FEATURE_NAMES = bureau_features(bureau, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
del bureau
gc.collect()
else:
print('Already generated features based on bureau application')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_bal_train.pkl')):
bureau = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'bureau.pkl'))
bureau_bal = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'bureau_balance.pkl'))
for col in bureau.select_dtypes(include=['category']).columns:
bureau.loc[:, col] = bureau.loc[:, col].cat.codes
for col in bureau_bal.select_dtypes(include=['category']).columns:
bureau_bal.loc[:, col] = bureau_bal.loc[:, col].cat.codes
print('Generating features based on credits reported to bureau and bureau balance ....')
t0 = time.clock()
data, FEATURE_NAMES = bureau_and_balance(bureau, bureau_bal, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_bal_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_bal_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on bureau and balance')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_train.pkl')):
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
print('Generating features based on previous application ....')
t0 = time.clock()
data, FEATURE_NAMES = prev_app_features(prev_app, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
del prev_app
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on previous application')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'pos_cash_train.pkl')):
pos_cash = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'POS_CASH_balance.pkl'))
for col in pos_cash.select_dtypes(include=['category']).columns:
pos_cash.loc[:, col] = pos_cash.loc[:, col].cat.codes
print('Generating features based on pos cash ....')
t0 = time.clock()
data, FEATURE_NAMES = pos_cash_features(pos_cash, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
del pos_cash
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'pos_cash_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'pos_cash_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on pos cash')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'credit_train.pkl')):
credit_bal = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'credit_card_balance.pkl'))
for col in credit_bal.select_dtypes(include=['category']).columns:
credit_bal.loc[:, col] = credit_bal.loc[:, col].cat.codes
print('Generating features based on Credit Card ....')
t0 = time.clock()
data, FEATURE_NAMES = credit_card_features(credit_bal, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
del credit_bal
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'credit_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'credit_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on Credit Card')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'installments_train.pkl')):
installments = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'installments_payments.pkl'))
for col in installments.select_dtypes(include=['category']).columns:
installments.loc[:, col] = installments.loc[:, col].cat.codes
print('Generating features based on Installments ....')
t0 = time.clock()
data, FEATURE_NAMES = get_installment_features(installments, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
del installments
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'installments_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'installments_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on Installments')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_bureau_train.pkl')):
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
bureau = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'bureau.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in bureau.select_dtypes(include=['category']).columns:
bureau.loc[:, col] = bureau.loc[:, col].cat.codes
print('Generating features based on Previous Applications and Bureau Applications....')
t0 = time.clock()
data, FEATURE_NAMES = prev_app_bureau(prev_app, bureau, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
del bureau, prev_app
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_bureau_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_bureau_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on Previous application and Bureau Applications')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_credit_train.pkl')):
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
credit_bal = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'credit_card_balance.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in credit_bal.select_dtypes(include=['category']).columns:
credit_bal.loc[:, col] = credit_bal.loc[:, col].cat.codes
print('Generating features based on Previous Applications and Credit card balance ....')
t0 = time.clock()
data, FEATURE_NAMES = prev_app_credit_card(prev_app, credit_bal, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
del credit_bal, prev_app
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_credit_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_credit_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on Previous application and Credit card balance')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_installments_train.pkl')):
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
installments = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'installments_payments.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in installments.select_dtypes(include=['category']).columns:
installments.loc[:, col] = installments.loc[:, col].cat.codes
print('Generating features based on Previous Applications and Installment Payments ....')
t0 = time.clock()
data, FEATURE_NAMES = prev_app_installments(prev_app, installments, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
del installments, prev_app
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_installments_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_installments_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on Previous application and Installment Payments.')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'loan_stacking_train.pkl')):
bureau = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'bureau.pkl'))
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
credit_bal = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'credit_card_balance.pkl'))
for col in bureau.select_dtypes(include=['category']).columns:
bureau.loc[:, col] = bureau.loc[:, col].cat.codes
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in credit_bal.select_dtypes(include=['category']).columns:
credit_bal.loc[:, col] = credit_bal.loc[:, col].cat.codes
print('Generating features based on loan stacking ....')
t0 = time.clock()
data, FEATURE_NAMES = loan_stacking(bureau, prev_app, credit_bal, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'loan_stacking_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'loan_stacking_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
del bureau
gc.collect()
else:
print('Already generated features based on loan stacking.')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'feature_groups_train.pkl')):
print('Generating features based on feature groups ....')
t0 = time.clock()
data, FEATURE_NAMES = feature_groups(data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'feature_groups_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'feature_groups_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on feature groups.')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_train.pkl')):
print('Generating features based on previous application and pos cash ....')
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
pos_cash = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'POS_CASH_balance.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in pos_cash.select_dtypes(include=['category']).columns:
pos_cash.loc[:, col] = pos_cash.loc[:, col].cat.codes
t0 = time.clock()
data, FEATURE_NAMES = prev_app_pos(prev_app, pos_cash, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on previous application and pos cash.')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_credit_bal_train.pkl')):
print('Generating features based on previous application, pos cash and credit card balance ....')
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
pos_cash = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'POS_CASH_balance.pkl'))
credit_bal = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'credit_card_balance.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in pos_cash.select_dtypes(include=['category']).columns:
pos_cash.loc[:, col] = pos_cash.loc[:, col].cat.codes
for col in credit_bal.select_dtypes(include=['category']).columns:
credit_bal.loc[:, col] = credit_bal.loc[:, col].cat.codes
t0 = time.time()
data, FEATURE_NAMES = prev_app_pos_credit(prev_app, pos_cash, credit_bal, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_credit_bal_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_credit_bal_test.pkl'))
print('\nTook: {} seconds'.format(time.time() - t0))
else:
print('Already generated features based on previous application, pos cash and credit card balance.')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_ohe_train.pkl')):
print('Generating features based on previous application one hot encoded features ....')
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
t0 = time.time()
data, FEATURE_NAMES = prev_app_ohe(prev_app, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_ohe_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_ohe_test.pkl'))
print('\nTook: {} seconds'.format(time.time() - t0))
else:
print('Already generated features based on previous application one hot encode features.')
def prepare_features(self):
tr = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'application_train.pkl'))
te = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'application_test.pkl'))
ntrain = len(tr)
data = pd.concat((tr, te))
del tr, te
gc.collect()
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'current_application_train.pkl')):
print('Generating features based on current application ....')
t0 = time.clock()
data, FEATURE_NAMES = current_application_features(data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'current_application_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'current_application_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on current application')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_train.pkl')):
bureau = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'bureau.pkl'))
for col in bureau.select_dtypes(include=['category']).columns:
bureau.loc[:, col] = bureau.loc[:, col].cat.codes
print('Generating features based on credits reported to bureau ....')
t0 = time.clock()
data, FEATURE_NAMES = bureau_features(bureau, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
del bureau
gc.collect()
else:
print('Already generated features based on bureau application')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_bal_train.pkl')):
bureau = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'bureau.pkl'))
bureau_bal = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'bureau_balance.pkl'))
for col in bureau.select_dtypes(include=['category']).columns:
bureau.loc[:, col] = bureau.loc[:, col].cat.codes
for col in bureau_bal.select_dtypes(include=['category']).columns:
bureau_bal.loc[:, col] = bureau_bal.loc[:, col].cat.codes
print('Generating features based on credits reported to bureau and bureau balance ....')
t0 = time.clock()
data, FEATURE_NAMES = bureau_and_balance(bureau, bureau_bal, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_bal_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_bal_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on bureau and balance')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_train.pkl')):
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
print('Generating features based on previous application ....')
t0 = time.clock()
data, FEATURE_NAMES = prev_app_features(prev_app, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
del prev_app
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on previous application')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'pos_cash_train.pkl')):
pos_cash = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'POS_CASH_balance.pkl'))
for col in pos_cash.select_dtypes(include=['category']).columns:
pos_cash.loc[:, col] = pos_cash.loc[:, col].cat.codes
print('Generating features based on pos cash ....')
t0 = time.clock()
data, FEATURE_NAMES = pos_cash_features(pos_cash, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
del pos_cash
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'pos_cash_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'pos_cash_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on pos cash')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'credit_train.pkl')):
credit_bal = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'credit_card_balance.pkl'))
for col in credit_bal.select_dtypes(include=['category']).columns:
credit_bal.loc[:, col] = credit_bal.loc[:, col].cat.codes
print('Generating features based on Credit Card ....')
t0 = time.clock()
data, FEATURE_NAMES = credit_card_features(credit_bal, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
del credit_bal
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'credit_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'credit_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on Credit Card')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'installments_train.pkl')):
installments = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'installments_payments.pkl'))
for col in installments.select_dtypes(include=['category']).columns:
installments.loc[:, col] = installments.loc[:, col].cat.codes
print('Generating features based on Installments ....')
t0 = time.clock()
data, FEATURE_NAMES = get_installment_features(installments, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
del installments
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'installments_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'installments_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on Installments')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_bureau_train.pkl')):
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
bureau = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'bureau.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in bureau.select_dtypes(include=['category']).columns:
bureau.loc[:, col] = bureau.loc[:, col].cat.codes
print('Generating features based on Previous Applications and Bureau Applications....')
t0 = time.clock()
data, FEATURE_NAMES = prev_app_bureau(prev_app, bureau, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
del bureau, prev_app
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_bureau_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_bureau_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on Previous application and Bureau Applications')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_credit_train.pkl')):
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
credit_bal = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'credit_card_balance.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in credit_bal.select_dtypes(include=['category']).columns:
credit_bal.loc[:, col] = credit_bal.loc[:, col].cat.codes
print('Generating features based on Previous Applications and Credit card balance ....')
t0 = time.clock()
data, FEATURE_NAMES = prev_app_credit_card(prev_app, credit_bal, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
del credit_bal, prev_app
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_credit_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_credit_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on Previous application and Credit card balance')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_installments_train.pkl')):
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
installments = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'installments_payments.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in installments.select_dtypes(include=['category']).columns:
installments.loc[:, col] = installments.loc[:, col].cat.codes
print('Generating features based on Previous Applications and Installment Payments ....')
t0 = time.clock()
data, FEATURE_NAMES = prev_app_installments(prev_app, installments, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
del installments, prev_app
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_installments_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_installments_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on Previous application and Installment Payments.')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'loan_stacking_train.pkl')):
bureau = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'bureau.pkl'))
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
credit_bal = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'credit_card_balance.pkl'))
for col in bureau.select_dtypes(include=['category']).columns:
bureau.loc[:, col] = bureau.loc[:, col].cat.codes
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in credit_bal.select_dtypes(include=['category']).columns:
credit_bal.loc[:, col] = credit_bal.loc[:, col].cat.codes
print('Generating features based on loan stacking ....')
t0 = time.clock()
data, FEATURE_NAMES = loan_stacking(bureau, prev_app, credit_bal, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'loan_stacking_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'loan_stacking_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
del bureau
gc.collect()
else:
print('Already generated features based on loan stacking.')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'feature_groups_train.pkl')):
print('Generating features based on feature groups ....')
t0 = time.clock()
data, FEATURE_NAMES = feature_groups(data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'feature_groups_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'feature_groups_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on feature groups.')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_train.pkl')):
print('Generating features based on previous application and pos cash ....')
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
pos_cash = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'POS_CASH_balance.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in pos_cash.select_dtypes(include=['category']).columns:
pos_cash.loc[:, col] = pos_cash.loc[:, col].cat.codes
t0 = time.clock()
data, FEATURE_NAMES = prev_app_pos(prev_app, pos_cash, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on previous application and pos cash.')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_credit_bal_train.pkl')):
print('Generating features based on previous application, pos cash and credit card balance ....')
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
pos_cash = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'POS_CASH_balance.pkl'))
credit_bal = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'credit_card_balance.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in pos_cash.select_dtypes(include=['category']).columns:
pos_cash.loc[:, col] = pos_cash.loc[:, col].cat.codes
for col in credit_bal.select_dtypes(include=['category']).columns:
credit_bal.loc[:, col] = credit_bal.loc[:, col].cat.codes
t0 = time.time()
data, FEATURE_NAMES = prev_app_pos_credit(prev_app, pos_cash, credit_bal, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_credit_bal_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_credit_bal_test.pkl'))
print('\nTook: {} seconds'.format(time.time() - t0))
else:
print('Already generated features based on previous application, pos cash and credit card balance.')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_ohe_train.pkl')):
print('Generating features based on previous application one hot encoded features ....')
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
t0 = time.time()
data, FEATURE_NAMES = prev_app_ohe(prev_app, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv71, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_ohe_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_ohe_test.pkl'))
print('\nTook: {} seconds'.format(time.time() - t0))
else:
print('Already generated features based on previous application one hot encode features.')
# This method currently takes care of loading engineered features from disk
# and merging train and test to report back a dataframe (data) which can be used by
# other layers.
def merge_datasets(self):
def get_filenames():
filenames = [f'application_',
f'current_application_',
f'bureau_',
f'bureau_bal_',
f'prev_app_',
f'pos_cash_',
f'credit_',
f'installments_',
f'prev_app_bureau_',
f'prev_app_credit_',
f'prev_app_installments_',
f'loan_stacking_',
f'feature_groups_',
f'prev_app_pos_cash_',
f'prev_app_pos_cash_credit_bal_',
f'prev_app_ohe_'
]
return filenames
train = []
test = []
filenames = get_filenames()
for filename_ in filenames:
tmp = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'{filename_}train.pkl'))
tmp.index = np.arange(len(tmp))
train.append(tmp)
for filename_ in filenames:
tmp = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'{filename_}test.pkl'))
tmp.index = np.arange(len(tmp))
test.append(tmp)
return pd.concat(train, axis=1), pd.concat(test, axis=1)
def feature_interaction(self, data, key, agg_feature, agg_func, agg_func_name):
key_name = '_'.join(key)
tmp = data.groupby(key)[agg_feature].apply(agg_func)\
.reset_index()\
.rename(columns={agg_feature: f'{agg_func_name}_{key_name}_{agg_feature}'})
data.loc[:, f'{agg_func_name}_{key_name}_{agg_feature}'] = data.loc[:, key]\
.merge(tmp, on=key, how='left')[f'{agg_func_name}_{key_name}_{agg_feature}']
return data
def get_features(self, train, test, compute_ohe):
data = pd.concat((train, test))
data.index = np.arange(len(data))
for col in data.select_dtypes(include=['category']).columns:
data[col] = data[col].cat.codes
# TODO: not very happy with the way we are computing interactions
# because if we omit any of this feature from pipeline it would
# still work but would most likely be a feature of all null values.
# concatenate OCCUPATION TYPE AND ORGANIZATION TYPE
data.loc[:, 'ORGANIZATION_OCCUPATION'] = pd.factorize(data.ORGANIZATION_TYPE.astype(np.str) +\
data.OCCUPATION_TYPE.astype(np.str)
)[0]
# interaction between total debt to income and (annuity / credit)
data.loc[:, 'debt_income_to_annuity_credit'] = data.total_debt_to_income / data.ratio_annuity_credit
# interaction between days birth and ratio of annuity to credit
data.loc[:, 'add_days_birth_annuity_credit'] = data.DAYS_BIRTH + data.ratio_annuity_credit
# interaction between ratio of annuity to credit with external source 2 score
data.loc[:, 'mult_annuity_credit_ext_source_2'] = data.ratio_annuity_credit * data.EXT_SOURCE_2
data.loc[:, 'ratio_annuity_credit_ext_source_2'] = data.ratio_annuity_credit / data.EXT_SOURCE_2.map(np.log1p)
data.loc[:, 'mult_annuity_credit_ext_source_1'] = data.ratio_annuity_credit * data.EXT_SOURCE_1
data.loc[:, 'ratio_annuity_credit_ext_source_1'] = data.ratio_annuity_credit / data.EXT_SOURCE_1.map(np.log1p)
data.loc[:, 'mult_annuity_credit_ext_source_3'] = data.ratio_annuity_credit * data.EXT_SOURCE_3
data.loc[:, 'ratio_annuity_credit_ext_source_3'] = data.ratio_annuity_credit / data.EXT_SOURCE_3.map(np.log1p)
# interaction between ratio of annuity to credit with total amount paid in installments
data.loc[:, 'mult_annuity_credit_amt_payment_sum'] = data.ratio_annuity_credit * data.AMT_PAYMENT_sum
# interaction between total amount paid in installments and delay in installments
data.loc[:, 'mult_amt_payment_sum_delay_installment'] = data.AMT_PAYMENT_sum * data.delay_in_installment_payments
# interaction between credit / annuity and age
data.loc[:, 'diff_credit_annuity_age'] = (data.AMT_CREDIT / data.AMT_ANNUITY) - (-data.DAYS_BIRTH / 365)
# interaction between ext_3 and age
data.loc[:, 'ext_3_age'] = data.EXT_SOURCE_3 * (-data.DAYS_BIRTH / 365)
# interaction between ext_2 and age
data.loc[:, 'ext_2_age'] = data.EXT_SOURCE_2 * (-data.DAYS_BIRTH / 365)
# interaction between rate and external source 2
data.loc[:, 'add_rate_ext_2'] = (data.AMT_CREDIT / data.AMT_ANNUITY) + data.EXT_SOURCE_2
# interaction between rate and age
data.loc[:, 'add_rate_age'] = (data.AMT_CREDIT / data.AMT_ANNUITY) + (-data.DAYS_BIRTH / 365)
# interaction between age and employed and external score 2
data.loc[:, 'add_mult_age_employed_ext_2'] = ((-data.DAYS_BIRTH / 365) +\
(-data.DAYS_EMPLOYED.replace({365243: np.nan}))) *\
(data.EXT_SOURCE_2)
# combine ratio annuity credit, region populative relative and ext source 2
data.loc[:, 'rate_annuity_region_ext_source_2'] = data.ratio_annuity_credit * data.REGION_POPULATION_RELATIVE * data.EXT_SOURCE_2
data.loc[:, 'region_ext_source_3'] = data.REGION_POPULATION_RELATIVE * data.EXT_SOURCE_3
# Relationship between AMT_REQ_CREDIT_BUREAU_HOUR and AMT_REQ_CREDIT_BUREAU_YEAR
data.loc[:, 'ratio_check_hour_to_year'] = data.AMT_REQ_CREDIT_BUREAU_HOUR.div(data.AMT_REQ_CREDIT_BUREAU_YEAR)
# Relationship between Income and ratio annuity credit
data.loc[:, 'mult_ratio_income'] = (data.ratio_annuity_credit * data.AMT_INCOME_TOTAL).map(np.log1p)
data.loc[:, 'div_ratio_income'] = (data.AMT_INCOME_TOTAL / data.ratio_annuity_credit).map(np.log1p)
# Gender, Education and other features
data = self.feature_interaction(data, ['CODE_GENDER', 'NAME_EDUCATION_TYPE'], 'EXT_SOURCE_2', np.mean, 'mean')
data = self.feature_interaction(data, ['CODE_GENDER', 'NAME_EDUCATION_TYPE'], 'EXT_SOURCE_2', np.var, 'var')
data = self.feature_interaction(data, ['CODE_GENDER', 'NAME_EDUCATION_TYPE'], 'EXT_SOURCE_1', np.mean, 'mean')
data = self.feature_interaction(data, ['CODE_GENDER', 'NAME_EDUCATION_TYPE'], 'AMT_CREDIT', np.mean, 'mean')
data = self.feature_interaction(data, ['CODE_GENDER', 'NAME_EDUCATION_TYPE'], 'AMT_ANNUITY', np.mean, 'mean')
data = self.feature_interaction(data, ['CODE_GENDER', 'NAME_EDUCATION_TYPE'], 'OWN_CAR_AGE', np.max, 'max')
data = self.feature_interaction(data, ['CODE_GENDER', 'NAME_EDUCATION_TYPE'], 'OWN_CAR_AGE', np.sum, 'sum')
# Gender, Occupation and Ext scores
data = self.feature_interaction(data, ['CODE_GENDER', 'OCCUPATION_TYPE'], 'EXT_SOURCE_2', np.mean, 'mean')
# Gender, Organization and other features
data = self.feature_interaction(data, ['CODE_GENDER', 'ORGANIZATION_TYPE'], 'EXT_SOURCE_2', np.mean, 'mean')
data = self.feature_interaction(data, ['CODE_GENDER', 'ORGANIZATION_TYPE'], 'AMT_ANNUITY', np.mean, 'mean')
data = self.feature_interaction(data, ['CODE_GENDER', 'ORGANIZATION_TYPE'], 'AMT_INCOME_TOTAL', np.mean, 'mean')
data = self.feature_interaction(data, ['CODE_GENDER', 'ORGANIZATION_TYPE'], 'DAYS_REGISTRATION', np.mean, 'mean')
data = self.feature_interaction(data, ['CODE_GENDER', 'ORGANIZATION_TYPE'], 'EXT_SOURCE_1', np.mean, 'mean')
# Gender, Reg city not work city and other fatures
data = self.feature_interaction(data, ['CODE_GENDER', 'REG_CITY_NOT_WORK_CITY'], 'AMT_ANNUITY', np.mean, 'mean')
data = self.feature_interaction(data, ['CODE_GENDER', 'REG_CITY_NOT_WORK_CITY'], 'CNT_CHILDREN', np.mean, 'mean')
data = self.feature_interaction(data, ['CODE_GENDER', 'REG_CITY_NOT_WORK_CITY'], 'DAYS_ID_PUBLISH', np.mean, 'mean')
# Income, Occupation and Ext Score
data = self.feature_interaction(data, ['NAME_INCOME_TYPE', 'OCCUPATION_TYPE'], 'EXT_SOURCE_2', np.mean, 'mean')
# Occupation and Organization and Ext Score
data = self.feature_interaction(data, ['OCCUPATION_TYPE', 'ORGANIZATION_TYPE'], 'EXT_SOURCE_2', np.mean, 'mean')
# Income, Education and Ext score
data = self.feature_interaction(data, ['NAME_INCOME_TYPE', 'NAME_EDUCATION_TYPE'], 'EXT_SOURCE_2', np.mean, 'mean')
# Education and Occupation and other features
data = self.feature_interaction(data, ['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE'], 'AMT_CREDIT', np.mean, 'mean')
data = self.feature_interaction(data, ['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE'], 'EXT_SOURCE_1', np.mean, 'mean')
data = self.feature_interaction(data, ['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE'], 'EXT_SOURCE_2', np.mean, 'mean')
data = self.feature_interaction(data, ['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE'], 'EXT_SOURCE_3', np.mean, 'mean')
data = self.feature_interaction(data, ['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE'], 'OWN_CAR_AGE', np.mean, 'mean')
# Education, Occupation, Reg city not work city and other features
data = self.feature_interaction(data, ['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE', 'REG_CITY_NOT_WORK_CITY'], 'EXT_SOURCE_2', np.mean, 'mean')
# Occupation and other features
data = self.feature_interaction(data, ['OCCUPATION_TYPE'], 'AMT_ANNUITY', np.mean, 'mean')
data = self.feature_interaction(data, ['OCCUPATION_TYPE'], 'CNT_CHILDREN', np.mean, 'mean')
data = self.feature_interaction(data, ['OCCUPATION_TYPE'], 'CNT_FAM_MEMBERS', np.mean, 'mean')
data = self.feature_interaction(data, ['OCCUPATION_TYPE'], 'DAYS_BIRTH', np.mean, 'mean')
data = self.feature_interaction(data, ['OCCUPATION_TYPE'], 'DAYS_EMPLOYED', np.mean, 'mean')
data = self.feature_interaction(data, ['OCCUPATION_TYPE'], 'EXT_SOURCE_2', np.mean, 'mean')
data = self.feature_interaction(data, ['OCCUPATION_TYPE'], 'EXT_SOURCE_3', np.mean, 'mean')
# frequency encoding of some of the categorical variables.
data = frequency_encoding(data, FREQ_ENCODING_COLS)
# one hot encoding of some of the categorical variables controlled by a flag
# if flag is True then one hot encoding else do frequency encoding.
if compute_ohe:
data = super(Modelv71, self).prepare_ohe(data, OHE_COLS, drop_col=True)
else:
data = frequency_encoding(data, OHE_COLS)
return data
# This method would perform feature engineering on merged datasets.
def fe(self, train, test, compute_ohe=True):
original_train = train.copy()
data = self.get_features(original_train, test, compute_ohe)
train = data.iloc[:len(train)]
test = data.iloc[len(train):]
del data, original_train
gc.collect()
return train, test
# This method just calls the base class with X,y, Xte and yte in the right format
# to train and returns a trained model which could be dumped on disk for further use.
# TODO: Find out why we are not able to load back model from disk and generate correct predictions
# there seems to be some issue in it right now.
def train(self, train, test, feature_list, is_eval, TARGET_NAME='TARGET', **params):
X = train.loc[:, feature_list]
y = train.loc[:, TARGET_NAME]
Xte = test.loc[:, feature_list]
yte = []
if is_eval:
yte = test.loc[:, TARGET_NAME]
return super(Modelv71, self).train_lgb(X, y, Xte, yte, **params)
# This method just takes in a model and test dataset and returns predictions
# prints out AUC on the test dataset as well in the process.
def evaluate(self, test, feature_list, is_eval, model, TARGET_NAME='TARGET'):
Xte = test.loc[:, feature_list]
yte = []
if is_eval:
yte = test.loc[:, TARGET_NAME]
return super(Modelv71, self).evaluate_lgb(Xte, yte, model)
def cross_validate(self, train, feature_list, params, TARGET_NAME='TARGET'):
Xtr = train.loc[:, feature_list]
ytr = train.loc[:, TARGET_NAME]
return super(Modelv71, self).cross_validate(Xtr, ytr, params)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Home Credit Default Risk Solution')
parser.add_argument('-input_path', help='Path to input directory') # path to raw files
parser.add_argument('-output_path', help='Path to output directory') # path to working data folder
parser.add_argument('-data_folder', help='Folder name of the dataset') # dataset folder name
parser.add_argument('-p', type=bool, help='Preprocess')
parser.add_argument('-cv', type=bool, help='Cross Validation')
parser.add_argument('-v', type=str, help='Validation')
parser.add_argument('-features', type=bool, help='Generate Features')
parser.add_argument('-s', type=bool, help='Whether to work on a sample or not.')
parser.add_argument('-seed', type=int, help='Random SEED')
parser.add_argument('-cv_seed', type=int, help='CV SEED')
parser.add_argument('-t', type=bool, help='Full Training Loop.')
parser.add_argument('-ensemble', type=bool , help='Average out predictions.')
args = parser.parse_args()
if args.p:
print('Preprocessing ...')
input_path = args.input_path
output_path = args.output_path
params = {
'input_path': input_path,
'output_path': output_path
}
m = Modelv71(**params)
m.preprocess()
elif args.features:
print('Generating features ...')
print()
input_path = args.input_path
output_path = args.output_path
params = {
'input_path': input_path,
'output_path': output_path,
}
m = Modelv71(**params)
m.prepare_features()
elif args.v is not None and len(args.v):
print('Train and generate predictions on a fold')
input_path = args.input_path
output_path = args.output_path
data_folder = args.data_folder
fold_indicator = args.v
is_sample = args.s
cv_seed = args.cv_seed
SEED = int(args.seed)
print('*' * 100)
print('SEED FOUND: {}'.format(SEED))
params = {
'input_path': input_path,
'output_path': output_path
}
PARAMS = joblib.load(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_{cv_seed}_params.pkl'))
# Set seed to Params
PARAMS['seed'] = SEED
PARAMS['feature_fraction_seed'] = SEED
PARAMS['bagging_seed'] = SEED
print('*' * 100)
print('PARAMS: {}'.format(PARAMS))
m = Modelv71(**params)
if os.path.exists(os.path.join(basepath, output_path + f'{data_folder}data.h5')):
print('Loading dataset from disk ...')
data = pd.read_hdf(os.path.join(basepath, output_path + f'{data_folder}data.h5'), format='table', key='data')
else:
print('Merge feature groups and save them to disk ...')
train, test = m.merge_datasets()
train, test = m.fe(train, test)
data = pd.concat((train, test))
data.to_hdf(os.path.join(basepath, output_path + f'{data_folder}data.h5'), format='table', key='data')
del train, test
gc.collect()
itr = pd.read_csv(os.path.join(basepath, input_path + 'cv_idx.csv'), usecols=[fold_indicator])[fold_indicator].values
print('Shape of fold indices ', len(itr))
ite = np.array(list(set(np.arange(m.n_train)) - set(itr)))
train = data.iloc[:m.n_train].iloc[itr]
test = data.iloc[:m.n_train].iloc[ite]
del data
gc.collect()
if is_sample:
print('*' * 100)
print('Take a random sample of the training data ...')
train = train.sample(frac=SAMPLE_SIZE)
# check to see if feature list exists on disk or not for a particular model
if os.path.exists(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_features.npy')):
feature_list = np.load(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_features.npy'))
else:
feature_list = train.columns.tolist()
feature_list = list(set(feature_list) - set(COLS_TO_REMOVE))
np.save(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_features.npy'), feature_list)
# print features with null percentage
print('Top-5 features with highest percentage of null values ...\n')
print((train.loc[:, feature_list].isnull().sum() / len(train)).sort_values(ascending=False).iloc[:5])
# print number of features explored in the experiment
print('*' * 100)
print('Number of features: {}'.format(len(feature_list)))
print('*' * 100)
model_identifier = f'{data_folder}{MODEL_FILENAME}_{fold_indicator}_{SEED}'
if os.path.exists(os.path.join(basepath, output_path + f'{model_identifier}_model.txt')):
print('Loading model from disk ...')
model = lgb.Booster(model_file=os.path.join(basepath, output_path + f'{model_identifier}_model.txt'))
yhold = test.TARGET
hold_preds = np.array(model.predict(test.loc[:, feature_list]))
print('AUC score: {}'.format(roc_auc_score(yhold, hold_preds)))
else:
print('Saving model to disk ...')
# train model
model, feat_df = m.train(train, test, feature_list, is_eval=True, **PARAMS)
if not is_sample:
model.save_model(os.path.join(basepath, output_path + f'{model_identifier}_model.txt'))
if not os.path.exists(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_true_holdout.npy')):
np.save(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_true_holdout.npy'), test.TARGET)
hold_preds = model.predict(test.loc[:, feature_list])
np.save(os.path.join(basepath, output_path + f'{model_identifier}_preds_holdout.npy'), hold_preds)
feat_df.to_csv(os.path.join(basepath, output_path + f'{model_identifier}_feat_imp.csv'), index=False)
elif args.cv:
print('Cross validation on training and store parameters and cv score on disk ...')
input_path = args.input_path
output_path = args.output_path
data_folder = args.data_folder
is_sample = args.s
SEED = args.seed
params = {
'input_path': input_path,
'output_path': output_path
}
m = Modelv71(**params)
if os.path.exists(os.path.join(basepath, output_path + f'{data_folder}data.h5')):
print('Loading dataset from disk ...')
data = pd.read_hdf(os.path.join(basepath, output_path + f'{data_folder}data.h5'), format='table', key='data')
else:
print('Merge feature groups and save them to disk ...')
train, test = m.merge_datasets()
train, test = m.fe(train, test)
data = pd.concat((train, test))
data.to_hdf(os.path.join(basepath, output_path + f'{data_folder}data.h5'), format='table', key='data')
del train, test
gc.collect()
train = data.iloc[:m.n_train]
del data
gc.collect()
if is_sample:
print('*' * 100)
print('Take a random sample of the training data ...')
train = train.sample(frac=SAMPLE_SIZE)
# check to see if feature list exists on disk or not for a particular model
if os.path.exists(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_features.npy')):
feature_list = np.load(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_features.npy'))
else:
feature_list = train.columns.tolist()
feature_list = list(set(feature_list) - set(COLS_TO_REMOVE))
np.save(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_features.npy'), feature_list)
PARAMS['seed'] = SEED
PARAMS['feature_fraction_seed'] = SEED
PARAMS['bagging_seed'] = SEED
cv_history = m.cross_validate(train, feature_list, PARAMS.copy())
cv_score = str(cv_history.iloc[-1]['auc-mean']) + '_' + str(cv_history.iloc[-1]['auc-stdv'])
PARAMS['num_boost_round'] = len(cv_history)
print('*' * 100)
print('Best AUC: {}'.format(cv_score))
joblib.dump(PARAMS, os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_{SEED}_params.pkl'))
joblib.dump(cv_score, os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_{SEED}_cv.pkl'))
elif args.t:
print('Full Training')
input_path = args.input_path
output_path = args.output_path
data_folder = args.data_folder
SEED = int(args.seed)
params = {
'input_path': input_path,
'output_path': output_path
}
m = Modelv71(**params)
# Load or save data from/ on disk
if os.path.exists(os.path.join(basepath, output_path + f'{data_folder}data.h5')):
print('Loading dataset from disk ...')
data = pd.read_hdf(os.path.join(basepath, output_path + f'{data_folder}data.h5'), format='table', key='data')
else:
print('Merge feature groups and save them to disk ...')
train, test = m.merge_datasets()
train, test = m.fe(train, test)
data = pd.concat((train, test))
data.to_hdf(os.path.join(basepath, output_path + f'{data_folder}data.h5'), format='table', key='data')
del train, test
gc.collect()
# separate out training and test set.
train = data.iloc[:m.n_train]
test = data.iloc[m.n_train:]
# check to see if feature list exists on disk or not for a particular model
if os.path.exists(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_features.npy')):
feature_list = np.load(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_features.npy'))
else:
feature_list = train.columns.tolist()
feature_list = list(set(feature_list) - set(COLS_TO_REMOVE))
np.save(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_features.npy'), feature_list)
# Load params and holdout score from disk.
PARAMS = joblib.load(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_{SEED}_params.pkl'))
HOLDOUT_SCORE = joblib.load(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_{SEED}_cv.pkl'))
PARAMS['num_boost_round'] = int(1.2 * PARAMS['num_boost_round'])
PARAMS['learning_rate'] /= 1.2
PARAMS['seed'] = SEED
PARAMS['feature_fraction_seed'] = SEED
PARAMS['bagging_seed'] = SEED
print('*' * 100)
print('PARAMS are: {}'.format(PARAMS))
# train model
model, feat_df = m.train(train, test, feature_list, is_eval=False, **PARAMS)
# evaluation part
preds, score = m.evaluate(test, feature_list, is_eval=False, model=model)
sub_identifier = "%s-%s-%s-%s" % (datetime.now().strftime('%Y%m%d-%H%M'), MODEL_FILENAME, HOLDOUT_SCORE, SEED)
sub = pd.read_csv(os.path.join(basepath, 'data/raw/sample_submission.csv.zip'))
sub['TARGET'] = preds
sub.to_csv(os.path.join(basepath, 'submissions/%s.csv'%(sub_identifier)), index=False)
elif args.ensemble:
output_files = []
ensemble_preds = 0
for f in output_files:
sub = pd.read_csv(f)['TARGET'].values
ensemble_preds += sub
ensemble_preds /= len(output_files)
HOLDOUT_SCORE = .79479
sub_identifier = "%s-%s-%s" % (datetime.now().strftime('%Y%m%d-%H%M'), MODEL_FILENAME, HOLDOUT_SCORE)
sub = pd.read_csv(os.path.join(basepath, 'data/raw/sample_submission.csv.zip'))
sub['TARGET'] = ensemble_preds
sub.to_csv(os.path.join(basepath, 'submissions/ensemble_%s.csv'%(sub_identifier)), index=False)
|
StarcoderdataPython
|
8023055
|
<reponame>GeorgeBatch/ultrasound-nerve-segmentation
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
########################################################################################################################
# ======================================================================================================================
# check_pars
# ======================================================================================================================
########################################################################################################################
# read-only file!!!
# standard-module imports
from keras.optimizers import Adam
def check_dict_subset(subset, superset):
"""Checks if one nested dictionary is a subset of another
:param subset: subset dictionary
:param superset: superset dictionary
:return: if failed: gives helpful print statements and assertion error
if successful, prints 'Your parameter choice is valid'
"""
print("superset keys:", superset.keys())
print("subset keys:", subset.keys())
assert all(item in superset.keys() for item in subset.keys())
print("Subset keys is a subset of superset keys", all(item in superset.keys() for item in subset.keys()))
for key in subset.keys():
print("superset key items:", superset[key])
print("subset key items:", subset[key])
if type(superset[key]) == dict:
assert type(subset[key]) == type(superset[key])
check_dict_subset(subset[key], superset[key])
elif type(superset[key]) == list:
assert subset[key] in superset[key]
print("subset[key] item:", subset[key], " is in superset[key] items:", superset[key])
else:
print("Something went wrong. Uncomment the print statements in check_dict_subset() for easier debugging.")
return type(superset[key]), superset[key]
return 'Your parameter choice is valid'
# Only change ALLOWED_PARS if adding new functionality
ALLOWED_PARS = {
'outputs': [1, 2],
'activation': ['elu', 'relu'],
'pooling_block': {
'trainable': [True, False]},
'information_block': {
'inception': {
'v1': ['a', 'b'],
'v2': ['a', 'b', 'c'],
'et': ['a', 'b']},
'convolution': {
'simple': ['not_normalized', 'normalized'],
'dilated': ['not_normalized', 'normalized']}},
'connection_block': ['not_residual', 'residual']
}
# for reference: in combination, these parameter choice showed the best performance
BEST_OPTIMIZER = Adam(lr=0.0045)
BEST_PARS = {
'outputs': 2,
'activation': 'elu',
'pooling_block': {'trainable': True},
'information_block': {'inception': {'v2': 'b'}},
'connection_block': 'residual'
}
print(check_dict_subset(BEST_PARS, ALLOWED_PARS))
|
StarcoderdataPython
|
1745593
|
"""Utility Functions"""
import logging
from collections import namedtuple # pytype: disable=pyi-error
def get_logger(logname):
"""Create and return a logger object."""
logger = logging.getLogger(logname)
return logger
def log_method(method):
"""Generate method for logging"""
def wrapped(self, *args, **kwargs):
"""Method that gets called for logging"""
self.logger.info('Entering %s' % method.__name__)
return method(self, *args, **kwargs)
return wrapped
class MessageParseError(Exception):
"""Error for when parsing cannot be successfully completed."""
pass
class EapQueueMessage(namedtuple('EapQueueMessage',
'message src_mac port_mac')):
pass
class RadiusQueueMessage(namedtuple('RadiusQueueMessage',
'message src_mac identity state port_mac')):
pass
|
StarcoderdataPython
|
1924251
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Command-line interface to the OpenStack Bilean API.
"""
from __future__ import print_function
import argparse
import copy
import getpass
import hashlib
import json
import logging
import os
import sys
import traceback
from oslo_utils import encodeutils
from oslo_utils import importutils
import six.moves.urllib.parse as urlparse
import bileanclient
from bileanclient._i18n import _
from bileanclient.common import utils
from bileanclient import exc
from keystoneclient.auth.identity import v2 as v2_auth
from keystoneclient.auth.identity import v3 as v3_auth
from keystoneclient import discover
from keystoneclient import exceptions as ks_exc
from keystoneclient import session
osprofiler_profiler = importutils.try_import("osprofiler.profiler")
SUPPORTED_VERSIONS = [1,]
class BileanShell(object):
def _append_global_identity_args(self, parser):
# register common identity args
session.Session.register_cli_options(parser)
v3_auth.Password.register_argparse_arguments(parser)
parser.add_argument('--key-file',
dest='os_key',
help='DEPRECATED! Use --os-key.')
parser.add_argument('--ca-file',
dest='os_cacert',
help='DEPRECATED! Use --os-cacert.')
parser.add_argument('--cert-file',
dest='os_cert',
help='DEPRECATED! Use --os-cert.')
parser.add_argument('--os-tenant-id',
default=utils.env('OS_TENANT_ID'),
help='Defaults to env[OS_TENANT_ID].')
parser.add_argument('--os_tenant_id',
help=argparse.SUPPRESS)
parser.add_argument('--os-tenant-name',
default=utils.env('OS_TENANT_NAME'),
help='Defaults to env[OS_TENANT_NAME].')
parser.add_argument('--os_tenant_name',
help=argparse.SUPPRESS)
parser.add_argument('--os-region-name',
default=utils.env('OS_REGION_NAME'),
help='Defaults to env[OS_REGION_NAME].')
parser.add_argument('--os_region_name',
help=argparse.SUPPRESS)
parser.add_argument('--os-auth-token',
default=utils.env('OS_AUTH_TOKEN'),
help='Defaults to env[OS_AUTH_TOKEN].')
parser.add_argument('--os_auth_token',
help=argparse.SUPPRESS)
parser.add_argument('--os-service-type',
default=utils.env('OS_SERVICE_TYPE'),
help='Defaults to env[OS_SERVICE_TYPE].')
parser.add_argument('--os_service_type',
help=argparse.SUPPRESS)
parser.add_argument('--os-endpoint-type',
default=utils.env('OS_ENDPOINT_TYPE'),
help='Defaults to env[OS_ENDPOINT_TYPE].')
parser.add_argument('--os_endpoint_type',
help=argparse.SUPPRESS)
def get_base_parser(self):
parser = argparse.ArgumentParser(
prog='bilean',
description=__doc__.strip(),
epilog='See "bilean help COMMAND" '
'for help on a specific command.',
add_help=False,
formatter_class=HelpFormatter,
)
# Global arguments
parser.add_argument('-h', '--help',
action='store_true',
help=argparse.SUPPRESS,
)
parser.add_argument('--version',
action='version',
version=bileanclient.__version__)
parser.add_argument('-d', '--debug',
default=bool(utils.env('BILEANCLIENT_DEBUG')),
action='store_true',
help='Defaults to env[BILEANCLIENT_DEBUG].')
parser.add_argument('-v', '--verbose',
default=False, action="store_true",
help="Print more verbose output.")
parser.add_argument('-f', '--force',
dest='force',
default=False, action='store_true',
help='Prevent select actions from requesting '
'user confirmation.')
parser.add_argument('--os-bilean-url',
default=utils.env('OS_BILEAN_URL'),
help=('Defaults to env[OS_BILEAN_URL]. '
'If the provided bilean url contains '
'a version number and '
'`--os-bilean-api-version` is omitted '
'the version of the URL will be picked as '
'the bilean api version to use.'))
parser.add_argument('--os_bilean_url',
help=argparse.SUPPRESS)
parser.add_argument('--os-bilean-api-version',
default=utils.env('OS_BILEAN_API_VERSION',
default=None),
help='Defaults to env[OS_BILEAN_API_VERSION] or 2.')
parser.add_argument('--os_bilean_api_version',
help=argparse.SUPPRESS)
if osprofiler_profiler:
parser.add_argument('--profile',
metavar='HMAC_KEY',
help='HMAC key to use for encrypting context '
'data for performance profiling of operation. '
'This key should be the value of HMAC key '
'configured in osprofiler middleware in '
'bilean, it is specified in paste '
'configuration(/etc/bilean/api-paste.ini). '
'Without key the profiling will not be '
'triggered even if osprofiler is enabled on '
'server side.')
self._append_global_identity_args(parser)
return parser
def get_subcommand_parser(self, version):
parser = self.get_base_parser()
self.subcommands = {}
subparsers = parser.add_subparsers(metavar='<subcommand>')
submodule = utils.import_versioned_module(version, 'shell')
self._find_actions(subparsers, submodule)
self._find_actions(subparsers, self)
self._add_bash_completion_subparser(subparsers)
return parser
def _find_actions(self, subparsers, actions_module):
for attr in (a for a in dir(actions_module) if a.startswith('do_')):
# Replace underscores with hyphens in the commands
# displayed to the user
command = attr[3:].replace('_', '-')
callback = getattr(actions_module, attr)
desc = callback.__doc__ or ''
help = desc.strip().split('\n')[0]
arguments = getattr(callback, 'arguments', [])
subparser = subparsers.add_parser(command,
help=help,
description=desc,
add_help=False,
formatter_class=HelpFormatter
)
subparser.add_argument('-h', '--help',
action='help',
help=argparse.SUPPRESS,
)
self.subcommands[command] = subparser
for (args, kwargs) in arguments:
subparser.add_argument(*args, **kwargs)
subparser.set_defaults(func=callback)
def _add_bash_completion_subparser(self, subparsers):
subparser = subparsers.add_parser('bash_completion',
add_help=False,
formatter_class=HelpFormatter)
self.subcommands['bash_completion'] = subparser
subparser.set_defaults(func=self.do_bash_completion)
def _get_bilean_url(self, args):
"""Translate the available url-related options into a single string.
Return the endpoint that should be used to talk to Bilean if a
clear decision can be made. Otherwise, return None.
"""
if args.os_bilean_url:
return args.os_bilean_url
else:
return None
def _discover_auth_versions(self, session, auth_url):
# discover the API versions the server is supporting base on the
# given URL
v2_auth_url = None
v3_auth_url = None
try:
ks_discover = discover.Discover(session=session, auth_url=auth_url)
v2_auth_url = ks_discover.url_for('2.0')
v3_auth_url = ks_discover.url_for('3.0')
except ks_exc.ClientException as e:
# Identity service may not support discover API version.
# Lets trying to figure out the API version from the original URL.
url_parts = urlparse.urlparse(auth_url)
(scheme, netloc, path, params, query, fragment) = url_parts
path = path.lower()
if path.startswith('/v3'):
v3_auth_url = auth_url
elif path.startswith('/v2'):
v2_auth_url = auth_url
else:
# not enough information to determine the auth version
msg = ('Unable to determine the Keystone version '
'to authenticate with using the given '
'auth_url. Identity service may not support API '
'version discovery. Please provide a versioned '
'auth_url instead. error=%s') % (e)
raise exc.CommandError(msg)
return (v2_auth_url, v3_auth_url)
def _get_keystone_session(self, **kwargs):
ks_session = session.Session.construct(kwargs)
# discover the supported keystone versions using the given auth url
auth_url = kwargs.pop('auth_url', None)
(v2_auth_url, v3_auth_url) = self._discover_auth_versions(
session=ks_session,
auth_url=auth_url)
# Determine which authentication plugin to use. First inspect the
# auth_url to see the supported version. If both v3 and v2 are
# supported, then use the highest version if possible.
user_id = kwargs.pop('user_id', None)
username = kwargs.pop('username', None)
password = kwargs.pop('password', None)
user_domain_name = kwargs.pop('user_domain_name', None)
user_domain_id = kwargs.pop('user_domain_id', None)
# project and tenant can be used interchangeably
project_id = (kwargs.pop('project_id', None) or
kwargs.pop('tenant_id', None))
project_name = (kwargs.pop('project_name', None) or
kwargs.pop('tenant_name', None))
project_domain_id = kwargs.pop('project_domain_id', None)
project_domain_name = kwargs.pop('project_domain_name', None)
auth = None
use_domain = (user_domain_id or
user_domain_name or
project_domain_id or
project_domain_name)
use_v3 = v3_auth_url and (use_domain or (not v2_auth_url))
use_v2 = v2_auth_url and not use_domain
if use_v3:
auth = v3_auth.Password(
v3_auth_url,
user_id=user_id,
username=username,
password=password,
user_domain_id=user_domain_id,
user_domain_name=user_domain_name,
project_id=project_id,
project_name=project_name,
project_domain_id=project_domain_id,
project_domain_name=project_domain_name)
elif use_v2:
auth = v2_auth.Password(
v2_auth_url,
username,
password,
tenant_id=project_id,
tenant_name=project_name)
else:
# if we get here it means domain information is provided
# (caller meant to use Keystone V3) but the auth url is
# actually Keystone V2. Obviously we can't authenticate a V3
# user using V2.
exc.CommandError("Credential and auth_url mismatch. The given "
"auth_url is using Keystone V2 endpoint, which "
"may not able to handle Keystone V3 credentials. "
"Please provide a correct Keystone V3 auth_url.")
ks_session.auth = auth
return ks_session
def _get_kwargs_for_create_session(self, args):
if not args.os_username:
raise exc.CommandError(
_("You must provide a username via"
" either --os-username or "
"env[OS_USERNAME]"))
if not args.os_password:
# No password, If we've got a tty, try prompting for it
if hasattr(sys.stdin, 'isatty') and sys.stdin.isatty():
# Check for Ctl-D
try:
args.os_password = getpass.getpass('OS Password: ')
except EOFError:
pass
# No password because we didn't have a tty or the
# user Ctl-D when prompted.
if not args.os_password:
raise exc.CommandError(
_("You must provide a password via "
"either --os-password, "
"env[OS_PASSWORD], "
"or prompted response"))
# Validate password flow auth
project_info = (
args.os_tenant_name or args.os_tenant_id or (
args.os_project_name and (
args.os_project_domain_name or
args.os_project_domain_id
)
) or args.os_project_id
)
if not project_info:
# tenant is deprecated in Keystone v3. Use the latest
# terminology instead.
raise exc.CommandError(
_("You must provide a project_id or project_name ("
"with project_domain_name or project_domain_id) "
"via "
" --os-project-id (env[OS_PROJECT_ID])"
" --os-project-name (env[OS_PROJECT_NAME]),"
" --os-project-domain-id "
"(env[OS_PROJECT_DOMAIN_ID])"
" --os-project-domain-name "
"(env[OS_PROJECT_DOMAIN_NAME])"))
if not args.os_auth_url:
raise exc.CommandError(
_("You must provide an auth url via"
" either --os-auth-url or "
"via env[OS_AUTH_URL]"))
kwargs = {
'auth_url': args.os_auth_url,
'username': args.os_username,
'user_id': args.os_user_id,
'user_domain_id': args.os_user_domain_id,
'user_domain_name': args.os_user_domain_name,
'password': <PASSWORD>,
'tenant_name': args.os_tenant_name,
'tenant_id': args.os_tenant_id,
'project_name': args.os_project_name,
'project_id': args.os_project_id,
'project_domain_name': args.os_project_domain_name,
'project_domain_id': args.os_project_domain_id,
'insecure': args.insecure,
'cacert': args.os_cacert,
'cert': args.os_cert,
'key': args.os_key
}
return kwargs
def _get_versioned_client(self, api_version, args):
endpoint = self._get_bilean_url(args)
auth_token = args.os_auth_token
auth_req = (hasattr(args, 'func') and
utils.is_authentication_required(args.func))
if not auth_req or (endpoint and auth_token):
kwargs = {
'token': auth_token,
'insecure': args.insecure,
'timeout': args.timeout,
'cacert': args.os_cacert,
'cert': args.os_cert,
'key': args.os_key,
}
else:
kwargs = self._get_kwargs_for_create_session(args)
kwargs = {'session': self._get_keystone_session(**kwargs)}
return bileanclient.Client(api_version, endpoint, **kwargs)
def main(self, argv):
def _get_subparser(api_version):
try:
return self.get_subcommand_parser(api_version)
except ImportError as e:
if not str(e):
# Add a generic import error message if the raised
# ImportError has none.
raise ImportError('Unable to import module. Re-run '
'with --debug for more info.')
raise
# Parse args once to find version
# NOTE(flepied) Under Python3, parsed arguments are removed
# from the list so make a copy for the first parsing
base_argv = copy.deepcopy(argv)
parser = self.get_base_parser()
(options, args) = parser.parse_known_args(base_argv)
try:
# NOTE(flaper87): Try to get the version from the
# bilean-url first. If no version was specified, fallback
# to the api-bilean-version arg. If both of these fail then
# fallback to the minimum supported one and let keystone
# do the magic.
endpoint = self._get_bilean_url(options)
endpoint, url_version = utils.strip_version(endpoint)
except ValueError:
# NOTE(flaper87): ValueError is raised if no endpoint is provided
url_version = None
# build available subcommands based on version
try:
api_version = int(options.os_bilean_api_version or url_version or 1)
if api_version not in SUPPORTED_VERSIONS:
raise ValueError
except ValueError:
msg = ("Invalid API version parameter. "
"Supported values are %s" % SUPPORTED_VERSIONS)
utils.exit(msg=msg)
# Handle top-level --help/-h before attempting to parse
# a command off the command line
if options.help or not argv:
parser = _get_subparser(api_version)
self.do_help(options, parser=parser)
return 0
# Short-circuit and deal with help command right away.
sub_parser = _get_subparser(api_version)
args = sub_parser.parse_args(argv)
if args.func == self.do_help:
self.do_help(args, parser=sub_parser)
return 0
elif args.func == self.do_bash_completion:
self.do_bash_completion(args)
return 0
if not args.os_password and options.os_password:
args.os_password = options.os_password
if args.debug:
# Set up the root logger to debug so that the submodules can
# print debug messages
logging.basicConfig(level=logging.DEBUG)
# for iso8601 < 0.1.11
logging.getLogger('iso8601').setLevel(logging.WARNING)
LOG = logging.getLogger('bileanclient')
LOG.addHandler(logging.StreamHandler())
LOG.setLevel(logging.DEBUG if args.debug else logging.INFO)
profile = osprofiler_profiler and options.profile
if profile:
osprofiler_profiler.init(options.profile)
client = self._get_versioned_client(api_version, args)
try:
args.func(client, args)
except exc.Unauthorized:
raise exc.CommandError("Invalid OpenStack Identity credentials.")
finally:
if profile:
trace_id = osprofiler_profiler.get().get_base_id()
print("Profiling trace ID: %s" % trace_id)
print("To display trace use next command:\n"
"osprofiler trace show --html %s " % trace_id)
@utils.arg('command', metavar='<subcommand>', nargs='?',
help='Display help for <subcommand>.')
def do_help(self, args, parser):
"""Display help about this program or one of its subcommands."""
command = getattr(args, 'command', '')
if command:
if args.command in self.subcommands:
self.subcommands[args.command].print_help()
else:
raise exc.CommandError("'%s' is not a valid subcommand" %
args.command)
else:
parser.print_help()
if not args.os_bilean_api_version or args.os_bilean_api_version == '2':
# NOTE(NiallBunting) This currently assumes that the only versions
# are one and two.
try:
if command is None:
print("\nRun `bilean --os-bilean-api-version 1 help`"
" for v1 help")
else:
self.get_subcommand_parser(1)
if command in self.subcommands:
command = ' ' + command
print(("\nRun `bilean --os-bilean-api-version 1 help%s`"
" for v1 help") % (command or ''))
except ImportError:
pass
def do_bash_completion(self, _args):
"""Prints arguments for bash_completion.
Prints all of the commands and options to stdout so that the
bilean.bash_completion script doesn't have to hard code them.
"""
commands = set()
options = set()
for sc_str, sc in self.subcommands.items():
commands.add(sc_str)
for option in sc._optionals._option_string_actions.keys():
options.add(option)
commands.remove('bash_completion')
commands.remove('bash-completion')
print(' '.join(commands | options))
class HelpFormatter(argparse.HelpFormatter):
def start_section(self, heading):
# Title-case the headings
heading = '%s%s' % (heading[0].upper(), heading[1:])
super(HelpFormatter, self).start_section(heading)
def main():
try:
argv = [encodeutils.safe_decode(a) for a in sys.argv[1:]]
BileanShell().main(argv)
except KeyboardInterrupt:
utils.exit('... terminating bilean client', exit_code=130)
except Exception as e:
if utils.debug_enabled(argv) is True:
traceback.print_exc()
utils.exit(encodeutils.exception_to_unicode(e))
|
StarcoderdataPython
|
251501
|
<reponame>lefevre-fraser/openmeta-mms
from .mgardfconverter import MgaRdfConverter
|
StarcoderdataPython
|
11346643
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
from Reporte_funciones import *
df = pd.read_csv(
'https://gist.githubusercontent.com/chriddyp/'
'c78bf172206ce24f77d6363a2d754b59/raw/'
'c353e8ef842413cae56ae3920b8fd78468aa4cb2/'
'usa-agricultural-exports-2011.csv')
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[
html.H4(children='US Agriculture Exports (2011)'),
generate_table(df)
])
if __name__ == '__main__':
app.run_server(debug=True)
|
StarcoderdataPython
|
6703921
|
<gh_stars>1-10
import numpy as np
import platform
import os
import sys
from common.kalman.ekf import FastEKF1D, SimpleSensor
# radar tracks
SPEED, ACCEL = 0, 1 # Kalman filter states enum
rate, ratev = 20., 20. # model and radar are both at 20Hz
ts = 1./rate
freq_v_lat = 0.2 # Hz
k_v_lat = 2*np.pi*freq_v_lat*ts / (1 + 2*np.pi*freq_v_lat*ts)
freq_a_lead = .5 # Hz
k_a_lead = 2*np.pi*freq_a_lead*ts / (1 + 2*np.pi*freq_a_lead*ts)
# stationary qualification parameters
v_stationary_thr = 4. # objects moving below this speed are classified as stationary
v_oncoming_thr = -3.9 # needs to be a bit lower in abs value than v_stationary_thr to not leave "holes"
v_ego_stationary = 4. # no stationary object flag below this speed
class Track(object):
def __init__(self):
self.ekf = None
self.stationary = True
self.initted = False
def update(self, d_rel, y_rel, v_rel, d_path, v_ego_t_aligned):
if self.initted:
self.dPathPrev = self.dPath
self.vLeadPrev = self.vLead
self.vRelPrev = self.vRel
# relative values, copy
self.dRel = d_rel # LONG_DIST
self.yRel = y_rel # -LAT_DIST
self.vRel = v_rel # REL_SPEED
# compute distance to path
self.dPath = d_path
# computed velocity and accelerations
self.vLead = self.vRel + v_ego_t_aligned
if not self.initted:
self.aRel = 0. # nidec gives no information about this
self.vLat = 0.
self.aLead = 0.
else:
# estimate acceleration
a_rel_unfilt = (self.vRel - self.vRelPrev) / ts
a_rel_unfilt = np.clip(a_rel_unfilt, -10., 10.)
self.aRel = k_a_lead * a_rel_unfilt + (1 - k_a_lead) * self.aRel
v_lat_unfilt = (self.dPath - self.dPathPrev) / ts
self.vLat = k_v_lat * v_lat_unfilt + (1 - k_v_lat) * self.vLat
a_lead_unfilt = (self.vLead - self.vLeadPrev) / ts
a_lead_unfilt = np.clip(a_lead_unfilt, -10., 10.)
self.aLead = k_a_lead * a_lead_unfilt + (1 - k_a_lead) * self.aLead
if self.stationary:
# stationary objects can become non stationary, but not the other way around
self.stationary = v_ego_t_aligned > v_ego_stationary and abs(self.vLead) < v_stationary_thr
self.oncoming = self.vLead < v_oncoming_thr
if self.ekf is None:
self.ekf = FastEKF1D(ts, 1e3, [0.1, 1])
self.ekf.state[SPEED] = self.vLead
self.ekf.state[ACCEL] = 0
self.lead_sensor = SimpleSensor(SPEED, 1, 2)
self.vLeadK = self.vLead
self.aLeadK = self.aLead
else:
self.ekf.update_scalar(self.lead_sensor.read(self.vLead))
self.ekf.predict(ts)
self.vLeadK = float(self.ekf.state[SPEED])
self.aLeadK = float(self.ekf.state[ACCEL])
if not self.initted:
self.cnt = 1
self.vision_cnt = 0
else:
self.cnt += 1
self.initted = True
self.vision = False
def mix_vision(self, dist_to_vision, rel_speed_diff):
# rel speed is very hard to estimate from vision
if dist_to_vision < 4.0 and rel_speed_diff < 10.:
# vision point is never stationary
self.stationary = False
self.vision = True
self.vision_cnt += 1
def get_key_for_cluster(self):
# Weigh y higher since radar is inaccurate in this dimension
return [self.dRel, self.dPath*2, self.vRel]
# ******************* Cluster *******************
if platform.machine() == 'aarch64':
for x in sys.path:
pp = os.path.join(x, "phonelibs/hierarchy/lib")
if os.path.isfile(os.path.join(pp, "_hierarchy.so")):
sys.path.append(pp)
break
import _hierarchy
else:
from scipy.cluster import _hierarchy
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
# supersimplified function to get fast clustering. Got it from scipy
Z = np.asarray(Z, order='c')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
_hierarchy.cluster_dist(Z, T, float(t), int(n))
return T
RDR_TO_LDR = 2.7
def mean(l):
return sum(l)/len(l)
class Cluster(object):
def __init__(self):
self.tracks = set()
def add(self, t):
# add the first track
self.tracks.add(t)
# TODO: make generic
@property
def dRel(self):
return mean([t.dRel for t in self.tracks])
@property
def yRel(self):
return mean([t.yRel for t in self.tracks])
@property
def vRel(self):
return mean([t.vRel for t in self.tracks])
@property
def aRel(self):
return mean([t.aRel for t in self.tracks])
@property
def vLead(self):
return mean([t.vLead for t in self.tracks])
@property
def aLead(self):
return mean([t.aLead for t in self.tracks])
@property
def dPath(self):
return mean([t.dPath for t in self.tracks])
@property
def vLat(self):
return mean([t.vLat for t in self.tracks])
@property
def vLeadK(self):
return mean([t.vLeadK for t in self.tracks])
@property
def aLeadK(self):
return mean([t.aLeadK for t in self.tracks])
@property
def vision(self):
return any([t.vision for t in self.tracks])
@property
def vision_cnt(self):
return max([t.vision_cnt for t in self.tracks])
@property
def stationary(self):
return all([t.stationary for t in self.tracks])
@property
def oncoming(self):
return all([t.oncoming for t in self.tracks])
def toLive20(self, lead):
lead.dRel = float(self.dRel) - RDR_TO_LDR
lead.yRel = float(self.yRel)
lead.vRel = float(self.vRel)
lead.aRel = float(self.aRel)
lead.vLead = float(self.vLead)
lead.aLead = float(self.aLead)
lead.dPath = float(self.dPath)
lead.vLat = float(self.vLat)
lead.vLeadK = float(self.vLeadK)
lead.aLeadK = float(self.aLeadK)
lead.status = True
lead.fcw = False
def __str__(self):
ret = "x: %7.2f y: %7.2f v: %7.2f a: %7.2f" % (self.dRel, self.yRel, self.vRel, self.aRel)
if self.stationary:
ret += " stationary"
if self.vision:
ret += " vision"
if self.oncoming:
ret += " oncoming"
if self.vision_cnt > 0:
ret += " vision_cnt: %6.0f" % self.vision_cnt
return ret
def is_potential_lead(self, v_ego, enabled):
# predict cut-ins by extrapolating lateral speed by a lookahead time
# lookahead time depends on cut-in distance. more attentive for close cut-ins
# also, above 50 meters the predicted path isn't very reliable
# the distance at which v_lat matters is higher at higher speed
lookahead_dist = 40. + v_ego/1.2 #40m at 0mph, ~70m at 80mph
t_lookahead_v = [1., 0.]
t_lookahead_bp = [10., lookahead_dist]
# average dist
d_path = self.dPath
if enabled:
t_lookahead = np.interp(self.dRel, t_lookahead_bp, t_lookahead_v)
# correct d_path for lookahead time, considering only cut-ins and no more than 1m impact
lat_corr = np.clip(t_lookahead * self.vLat, -1, 0)
else:
lat_corr = 0.
d_path = np.maximum(d_path + lat_corr, 0)
if d_path < 1.5 and not self.stationary and not self.oncoming:
return True
else:
return False
def is_potential_lead2(self, lead_clusters):
if len(lead_clusters) > 0:
lead_cluster = lead_clusters[0]
# check if the new lead is too close and roughly at the same speed of the first lead: it might just be the second axle of the same vehicle
if (self.dRel - lead_cluster.dRel) < 8. and abs(self.vRel - lead_cluster.vRel) < 1.:
return False
else:
return True
else:
return False
|
StarcoderdataPython
|
3206233
|
__author__ = 'vid'
import os
import math
import natsort
def q2(fi):
return ((4*math.pi*1.33*math.sin(fi*math.pi/360))/(532*10**(-9)))**2
pot = os.getcwd()
seznam = os.listdir(pot)
slovar = {}
seznam = natsort.natsorted(seznam)
print(seznam)
for a in seznam:
if a[-4:] == '.ASC':
b = a.split('_')
kot = float(b[0])
key = b[1][0]
vekt = q2(kot)
if key in slovar:
slovar[key].append([vekt, b[1]])
else:
slovar[key] = [[vekt, b[1]]]
|
StarcoderdataPython
|
3319534
|
from requests import Response
from otscrape.core.base.extractor import Extractor
class RequestText(Extractor):
def __init__(self, target=None, *, bytes_result=False, encoding=None, project=True, replace_error=None):
super().__init__(target=target, project=project, replace_error=replace_error)
self.bytes_result = bytes_result
self.encoding = encoding
def extract(self, page, cache):
x = page[self.target]
assert isinstance(x, Response)
if self.bytes_result:
return x.content
elif self.encoding:
return x.content.decode(self.encoding)
return x.text
class RequestStatusCode(Extractor):
def extract(self, page, cache):
target = self.target
assert isinstance(page[target], Response)
return page[target].status_code
class RequestJSON(Extractor):
def extract(self, page, cache):
target = self.target
assert isinstance(page[target], Response)
return page[target].json()
|
StarcoderdataPython
|
1758596
|
<filename>src/util/community_info/api_info_center.py
import json
import os
import networkx as nx
from ..config import COMMUNITY_FREQUENCY_STORE_PATH, JAVADOC_GLOBAL_NAME, LATEST_COMMUNITY_MAP_PATH, MENIA_WHOLE_PREDICTION_STORE_PATH
from ..utils import normalize
class APIinfoCenter:
def __init__(self, doc_name: str = JAVADOC_GLOBAL_NAME) -> None:
self.community_frequency_ratio = 0.3
self.community_degree_ratio = 0.7
with open(COMMUNITY_FREQUENCY_STORE_PATH[doc_name], 'r', encoding='utf-8') as rf:
self.community_frequency = dict(json.load(rf))
self.community_map = nx.read_gexf(LATEST_COMMUNITY_MAP_PATH[doc_name])
with open(MENIA_WHOLE_PREDICTION_STORE_PATH[doc_name], 'r', encoding='utf-8') as rf:
self.thread2api = dict(json.load(rf))
self.api2thread = {}
for thread_id, mention2api in self.thread2api.items():
apis = list(mention2api.values())
for api in apis:
if api not in self.api2thread.keys():
self.api2thread[api] = []
self.api2thread[api].append(str(thread_id))
def get_communtiy_frequency(self, api: str) -> int:
if api not in self.community_frequency.keys():
return -1
return self.community_frequency[api]
def get_degree_in_community(self, api: str) -> int:
if api not in self.community_map.nodes:
return -1
return len(self.community_map.adj[api])
def get_api_community_score(self, api: str):
return self.community_frequency_ratio*normalize(self.get_communtiy_frequency(api)) + self.community_degree_ratio*normalize(self.get_degree_in_community(api))
def get_related_thread_ids(self, api: str):
if api not in self.api2thread.keys():
return []
return self.api2thread[api]
|
StarcoderdataPython
|
4920156
|
<filename>fleet-rec/fleetrec/run.py
import argparse
import os
import yaml
from paddle.fluid.incubate.fleet.parameter_server import version
from fleetrec.core.factory import TrainerFactory
from fleetrec.core.utils import envs
from fleetrec.core.utils import util
engines = {"TRAINSPILER": {}, "PSLIB": {}}
clusters = ["SINGLE", "LOCAL_CLUSTER", "CLUSTER"]
def set_runtime_envs(cluster_envs, engine_yaml):
def get_engine_extras():
with open(engine_yaml, 'r') as rb:
_envs = yaml.load(rb.read(), Loader=yaml.FullLoader)
flattens = envs.flatten_environs(_envs)
engine_extras = {}
for k, v in flattens.items():
if k.startswith("train.trainer."):
engine_extras[k] = v
return engine_extras
if cluster_envs is None:
cluster_envs = {}
envs.set_runtime_environs(cluster_envs)
envs.set_runtime_environs(get_engine_extras())
need_print = {}
for k, v in os.environ.items():
if k.startswith("train.trainer."):
need_print[k] = v
print(envs.pretty_print_envs(need_print, ("Runtime Envs", "Value")))
def get_engine(engine):
engine = engine.upper()
if version.is_transpiler():
run_engine = engines["TRAINSPILER"].get(engine, None)
else:
run_engine = engines["PSLIB"].get(engine, None)
if run_engine is None:
raise ValueError("engine only support SINGLE/LOCAL_CLUSTER/CLUSTER")
return run_engine
def single_engine(args):
print("use single engine to run model: {}".format(args.model))
single_envs = {}
single_envs["train.trainer.trainer"] = "SingleTrainer"
single_envs["train.trainer.threads"] = "2"
single_envs["train.trainer.engine"] = "single"
set_runtime_envs(single_envs, args.model)
trainer = TrainerFactory.create(args.model)
return trainer
def cluster_engine(args):
print("launch cluster engine with cluster to run model: {}".format(args.model))
cluster_envs = {}
cluster_envs["train.trainer.trainer"] = "ClusterTrainer"
cluster_envs["train.trainer.engine"] = "cluster"
set_runtime_envs(cluster_envs, args.model)
trainer = TrainerFactory.create(args.model)
return trainer
def cluster_mpi_engine(args):
print("launch cluster engine with cluster to run model: {}".format(args.model))
cluster_envs = {}
cluster_envs["train.trainer.trainer"] = "CtrCodingTrainer"
set_runtime_envs(cluster_envs, args.model)
trainer = TrainerFactory.create(args.model)
return trainer
def local_cluster_engine(args):
print("launch cluster engine with cluster to run model: {}".format(args.model))
from fleetrec.core.engine.local_cluster_engine import LocalClusterEngine
cluster_envs = {}
cluster_envs["server_num"] = 1
cluster_envs["worker_num"] = 1
cluster_envs["start_port"] = 36001
cluster_envs["log_dir"] = "logs"
cluster_envs["train.trainer.trainer"] = "ClusterTrainer"
cluster_envs["train.trainer.strategy"] = "async"
cluster_envs["train.trainer.threads"] = "2"
cluster_envs["train.trainer.engine"] = "local_cluster"
cluster_envs["CPU_NUM"] = "2"
set_runtime_envs(cluster_envs, args.model)
launch = LocalClusterEngine(cluster_envs, args.model)
return launch
def local_mpi_engine(args):
print("launch cluster engine with cluster to run model: {}".format(args.model))
from fleetrec.core.engine.local_mpi_engine import LocalMPIEngine
print("use 1X1 MPI ClusterTraining at localhost to run model: {}".format(args.model))
mpi = util.run_which("mpirun")
if not mpi:
raise RuntimeError("can not find mpirun, please check environment")
cluster_envs = {}
cluster_envs["mpirun"] = mpi
cluster_envs["train.trainer.trainer"] = "CtrCodingTrainer"
cluster_envs["log_dir"] = "logs"
cluster_envs["train.trainer.engine"] = "local_cluster"
set_runtime_envs(cluster_envs, args.model)
launch = LocalMPIEngine(cluster_envs, args.model)
return launch
def engine_registry():
engines["TRAINSPILER"]["SINGLE"] = single_engine
engines["TRAINSPILER"]["LOCAL_CLUSTER"] = local_cluster_engine
engines["TRAINSPILER"]["CLUSTER"] = cluster_engine
engines["PSLIB"]["SINGLE"] = local_mpi_engine
engines["PSLIB"]["LOCAL_CLUSTER"] = local_mpi_engine
engines["PSLIB"]["CLUSTER"] = cluster_mpi_engine
engine_registry()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='fleet-rec run')
parser.add_argument("-m", "--model", type=str)
parser.add_argument("-e", "--engine", type=str)
args = parser.parse_args()
if not os.path.exists(args.model) or not os.path.isfile(args.model):
raise ValueError("argument model: {} error, must specify an existed YAML file".format(args.model))
if args.engine.upper() not in clusters:
raise ValueError("argument engine: {} error, must in {}".format(args.engine, clusters))
which_engine = get_engine(args.engine)
engine = which_engine(args)
engine.run()
|
StarcoderdataPython
|
4880358
|
<filename>spikeforest/spikeforest_analysis/computerecordinginfo.py
import mlprocessors as mlpr
import json
import spikeextractors as si
from .sfmdaextractors import SFMdaRecordingExtractor, SFMdaSortingExtractor
# _CONTAINER = 'sha1://5627c39b9bd729fc011cbfce6e8a7c37f8bcbc6b/spikeforest_basic.simg'
# _CONTAINER = 'sha1://0944f052e22de0f186bb6c5cb2814a71f118f2d1/spikeforest_basic.simg' #MAY26JJJ
_CONTAINER = 'sha1://4904b8f914eb159618b6579fb9ba07b269bb2c61/06-26-2019/spikeforest_basic.simg'
# A MountainLab processor for generating the summary info for a recording
class ComputeRecordingInfo(mlpr.Processor):
NAME = 'ComputeRecordingInfo'
VERSION = '0.1.1'
CONTAINER = _CONTAINER
recording_dir = mlpr.Input(directory=True, description='Recording directory')
channels = mlpr.IntegerListParameter(description='List of channels to use.', optional=True, default=[])
json_out = mlpr.Output('Info in .json file')
def run(self):
ret = {}
recording = SFMdaRecordingExtractor(dataset_directory=self.recording_dir, download=True)
if len(self.channels) > 0:
recording = si.SubRecordingExtractor(parent_recording=recording, channel_ids=self.channels)
ret['samplerate'] = recording.get_sampling_frequency()
ret['num_channels'] = len(recording.get_channel_ids())
ret['duration_sec'] = recording.get_num_frames() / ret['samplerate']
write_json_file(self.json_out, ret)
def write_json_file(fname, obj):
with open(fname, 'w') as f:
json.dump(obj, f)
|
StarcoderdataPython
|
258676
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import onnx
from onnx import helper
from onnx.helper import make_opsetid
from onnx import TensorProto
input_info = helper.make_tensor_value_info('input', TensorProto.BFLOAT16, [1, 5])
output_info = helper.make_tensor_value_info('output', TensorProto.BFLOAT16, [1, 5])
# Create a node (NodeProto) - This is based on Pad-11
node_def = helper.make_node(
'Identity', # node name
['input'], # inputs
['output'] # outputs
)
graph_def = helper.make_graph(nodes=[node_def], name='test_types_BLOAT16',
inputs=[input_info], outputs=[output_info])
model_def = helper.make_model(graph_def, producer_name='AIInfra',
opset_imports=[make_opsetid('', 13)])
onnx.checker.check_model(model_def)
onnx.helper.strip_doc_string(model_def)
final_model = onnx.shape_inference.infer_shapes(model_def)
onnx.checker.check_model(final_model)
onnx.save(final_model, 'test_types_BFLOAT16.onnx')
|
StarcoderdataPython
|
4809184
|
from rich._tools import iter_first, iter_last, iter_first_last, ratio_divide
def test_iter_first():
assert list(iter_first([])) == []
iterable = iter_first(["apples", "oranges", "pears", "lemons"])
assert next(iterable) == (True, "apples")
assert next(iterable) == (False, "oranges")
assert next(iterable) == (False, "pears")
assert next(iterable) == (False, "lemons")
def test_iter_last():
assert list(iter_last([])) == []
iterable = iter_last(["apples", "oranges", "pears", "lemons"])
assert next(iterable) == (False, "apples")
assert next(iterable) == (False, "oranges")
assert next(iterable) == (False, "pears")
assert next(iterable) == (True, "lemons")
def test_iter_first_last():
assert list(iter_first_last([])) == []
iterable = iter_first_last(["apples", "oranges", "pears", "lemons"])
assert next(iterable) == (True, False, "apples")
assert next(iterable) == (False, False, "oranges")
assert next(iterable) == (False, False, "pears")
assert next(iterable) == (False, True, "lemons")
def test_ratio_divide():
assert ratio_divide(10, [1]) == [10]
assert ratio_divide(10, [1, 1]) == [5, 5]
assert ratio_divide(12, [1, 3]) == [3, 9]
assert ratio_divide(0, [1, 3]) == [0, 0]
assert ratio_divide(0, [1, 3], [1, 1]) == [1, 1]
assert ratio_divide(10, [1, 0]) == [10, 0]
|
StarcoderdataPython
|
12840344
|
"""check_read_rom.py
get ROMID of 1-wire device.
assume only one 1-wire device on the bus.
"""
import tpow.usb9097
import tpow.device
import cfg
bus = tpow.usb9097.USB9097(cfg.com_port) # USB9097('COM3')
id_little = tpow.device.read_rom(bus)
id_big = [a for a in reversed(id_little)]
print(" ".join(['%02X' % ord(a) for a in id_big]))
|
StarcoderdataPython
|
1895706
|
<filename>pythia/datasets/multi_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates.
"""
MultiDataset class is used by DatasetLoader class to load multiple datasets and more granular
"""
import sys
import numpy as np
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from pythia.common.batch_collator import BatchCollator
from pythia.common.registry import registry
from pythia.datasets.samplers import DistributedSampler
from pythia.utils.distributed_utils import synchronize, is_main_process, broadcast_scalar
from pythia.utils.general import get_batch_size
class MultiDataset:
"""
MultiDataset class that is used for training on multiple datasets together.
"""
def __init__(self, dataset_type="train"):
self._dataset_type = dataset_type
self.writer = registry.get("writer")
self._is_main_process = is_main_process()
self._global_config = registry.get("config")
def _process_datasets(self):
if "datasets" not in self.opts:
self.writer.write(
"No datasets attribute present. Setting default to vqa2."
"warning",
)
datasets = "vqa2"
else:
datasets = self.opts["datasets"]
if type(datasets) == str:
datasets = list(map(lambda x: x.strip(), datasets.split(",")))
self._given_datasets = datasets
def load(self, **opts):
self.opts = opts
self._process_datasets()
self._datasets = []
self._builders = []
self._loaders = []
self._samplers = []
self._iterators = []
self._total_length = 0
self._per_dataset_lengths = []
self._num_datasets = 0
self._finished_iterators = {}
self._used_once = {}
for dataset in self._given_datasets:
builder_class = registry.get_builder_class(dataset)
if builder_class is None:
print("No builder class found for %s." % dataset)
continue
builder_instance = builder_class()
if dataset in self.opts["dataset_attributes"]:
attributes = self.opts["dataset_attributes"][dataset]
else:
self.writer.write(
"Dataset %s is missing from "
"dataset_attributes in config." % dataset,
"error",
)
sys.exit(1)
builder_instance.build(self._dataset_type, attributes)
dataset_instance = builder_instance.load(self._dataset_type, attributes)
if dataset_instance is None:
continue
loader_instance, sampler_instance = self.build_dataloader(
dataset_instance, self.opts
)
self._builders.append(builder_instance)
self._datasets.append(dataset_instance)
self._loaders.append(loader_instance)
self._samplers.append(sampler_instance)
self._per_dataset_lengths.append(len(dataset_instance))
self._total_length += len(dataset_instance)
self._num_datasets = len(self._datasets)
self._dataset_probablities = [
1 / self._num_datasets for _ in range(self._num_datasets)
]
training_parameters = self._global_config.training_parameters
self._proportional_sampling = training_parameters.dataset_size_proportional_sampling
if self._dataset_type != "train":
# If it is val or test, it needs to be all datasets need to be fully iterated
# as metrics will be calculated in eval mode over complete datasets
self._proportional_sampling = True
if self._proportional_sampling is True:
self._dataset_probablities = self._per_dataset_lengths[:]
self._dataset_probablities = [
prob / self._total_length for prob in self._dataset_probablities
]
self._loader_index = 0
self._chosen_dataset = self._datasets[self._loader_index]
self._chosen_loader = self._loaders[self._loader_index]
@property
def dataset_type(self):
return self._dataset_type
@property
def num_datasets(self):
return self._num_datasets
def get_datasets(self):
return self._datasets
@property
def first_loader(self):
return self._loaders[0]
def __len__(self):
# Since, this is iterator, we need to return total length == number of batches
return self._total_length // get_batch_size()
def __iter__(self):
if self._num_datasets == 1:
return iter(self._loaders[0])
self._iterators = []
self._finished_iterators = {}
self._used_once = {}
for loader in self._loaders:
self._iterators.append(iter(loader))
self._chosen_iterator = self._iterators[self._loader_index]
return self
def __next__(self):
try:
next_batch = next(self._chosen_iterator)
except StopIteration:
if (
self._proportional_sampling is True or
len(self._used_once) != self._num_datasets
):
self._finished_iterators[self._loader_index] = 1
if len(self._finished_iterators) == self._num_datasets:
raise
else:
self.change_dataloader()
next_batch = next(self._chosen_iterator)
else:
raise
self._used_once[self._loader_index] = 1
return next_batch
def change_dataloader(self):
if self._num_datasets <= 1:
return
choice = 0
if self._is_main_process:
choice = np.random.choice(
self._num_datasets, 1, p=self._dataset_probablities
)[0]
while choice in self._finished_iterators:
choice = np.random.choice(
self._num_datasets, 1, p=self._dataset_probablities
)[0]
choice = broadcast_scalar(choice, 0, device=registry.get("current_device"))
self._loader_index = choice
self._chosen_dataset = self._datasets[self._loader_index]
self._chosen_loader = self._loaders[self._loader_index]
self._chosen_iterator = self._iterators[self._loader_index]
def verbose_dump(self, *args, **kwargs):
self._chosen_dataset.verbose_dump(*args, **kwargs)
def prepare_batch(self, batch):
batch = self._chosen_dataset.prepare_batch(batch)
self.change_dataloader()
return batch
def update_registry_for_model(self, config):
"""
Use this if there is some specific configuration required by model
which must be inferred at runtime.
"""
for builder in self._builders:
builder.update_registry_for_model(config)
def init_args(self, parser):
parser.add_argument_group("General MultiDataset Arguments")
parser.add_argument(
"-dsp",
"--dataset_size_proportional_sampling",
type=bool,
default=0,
help="Pass if you want to sample from"
" dataset according to its size. Default: Equal "
" weighted sampling",
)
# TODO: Figure out later if we want to init args from datasets
# self._init_args(parser)
def _init_args(self, parser):
"""Override this function to add extra parameters to
parser in your child task class.
Parameters
----------
parser : ArgumentParser
Original parser object passed from the higher level classes like
trainer
Returns
-------
type
Description of returned object.
"""
for builder in self._builders:
builder.init_args(parser)
def clean_config(self, config):
"""
Override this in case you want to clean the config you updated earlier
in update_registry_for_model
"""
return config
def build_dataloader(self, dataset, opts):
training_parameters = self._global_config.training_parameters
num_workers = training_parameters.num_workers
pin_memory = training_parameters.pin_memory
other_args = {}
self._add_extra_args_for_dataloader(dataset, opts, other_args)
loader = DataLoader(
dataset=dataset,
pin_memory=pin_memory,
collate_fn=BatchCollator(),
num_workers=num_workers,
**other_args
)
loader.dataset_type = self._dataset_type
return loader, other_args.get("sampler", None)
def _add_extra_args_for_dataloader(self, dataset, opts, other_args={}):
training_parameters = self._global_config.training_parameters
dataset_type = self._dataset_type
other_args["shuffle"] = False
if dataset_type != "test":
other_args["shuffle"] = True
if (
training_parameters.local_rank is not None
and training_parameters.distributed
):
other_args["sampler"] = DistributedSampler(dataset, shuffle=other_args["shuffle"])
# Shuffle is mutually exclusive with sampler, let DistributedSampler take care of
# shuffle and pop from main args
other_args.pop("shuffle")
other_args["batch_size"] = get_batch_size()
return other_args
def seed_sampler(self, epoch):
training_parameters = self._global_config.training_parameters
if (
training_parameters.local_rank is not None
and training_parameters.distributed
):
for sampler in self._samplers:
assert hasattr(sampler, "set_epoch"), "Can't seed without `set_epoch` method"
sampler.set_epoch(epoch)
|
StarcoderdataPython
|
3482012
|
<reponame>noxtoby/MedICSS2019-TADPOLE
# import sys
# sys.path.append('..')
from os.path import join
from tadpole.io import load_tadpole_data, write_submission_table
from tadpole.validation import get_test_subjects
from tadpole.submission import create_submission_table
from tadpole.models.simple import create_prediction
# Script requires that TADPOLE_D1_D2.csv is in the parent directory.
# Change if necessary.
dataLocationLB1LB2 = '../data/' # current directory
tadpoleLB1LB2_file = join(dataLocationLB1LB2, 'TADPOLE_LB1_LB2.csv')
output_file = '../data/TADPOLE_Submission_SummerSchool2018_TeamName1.csv'
print('Loading data ...')
LB_table, LB_targets = load_tadpole_data(tadpoleLB1LB2_file)
print('Generating forecasts ...')
# * Create arrays to contain the 84 monthly forecasts for each LB2 subject
n_forecasts = 7 * 12 # forecast 7 years (84 months).
lb2_subjects = get_test_subjects(LB_table)
submission = []
# Each subject in LB2
for rid in lb2_subjects:
subj_data = LB_table.query('RID == @rid')
subj_targets = LB_targets.query('RID == @rid')
# *** Construct example forecasts
subj_forecast = create_submission_table([rid], n_forecasts)
subj_forecast = create_prediction(subj_data, subj_targets, subj_forecast)
submission.append(subj_forecast)
## Now construct the forecast spreadsheet and output it.
print('Constructing the output spreadsheet {0} ...'.format(output_file))
write_submission_table(submission, output_file)
|
StarcoderdataPython
|
6645604
|
###############################################################################
# Simple models of the effect of blurring and churning on the properties of
# the Milky Way
###############################################################################
from functools import wraps
import numpy
from scipy import integrate
from galpy.orbit import Orbit
from galpy.df import dehnendf
from skewnormal import skewnormal
_R0= 8. #kpc
_V0= 220. #kms
_LINEARENRICHMENT= False
_TAUEQ= 2.
_ZINIT= 0.12
# defaults
_SKEWM_DEFAULT= 0.4
_SKEWS_DEFAULT= 0.1
_SKEWA_DEFAULT= -4.
_DFEHDR_DEFAULT= -0.1
def scalarDecorator(func):
"""Decorator to return scalar outputs"""
@wraps(func)
def scalar_wrapper(*args,**kwargs):
if numpy.array(args[0]).shape == ():
scalarOut= True
newargs= ()
for ii in range(len(args)):
if ii == 0:
newargs= newargs+(numpy.array([args[ii]]),)
else:
newargs= newargs+(args[ii],)
args= newargs
else:
scalarOut= False
result= func(*args,**kwargs)
if scalarOut:
return result[0]
else:
return result
return scalar_wrapper
#
# PURE DYNAMICS
#
# Blurring p(Rg|R)
@scalarDecorator
def blurring_pRgR(Rg,R,sr=31.4,hr=3.,hs=267.):
"""
NAME:
blurring_pRgR
PURPOSE:
The distribution of guiding-center radii at a given R from blurring
INPUT:
Rg - Guiding center radius (/kpc), can be array
R - Given radius (/kpc)
sr= (31.4 km/s) velocity dispersion at R0
hr= (3 kpc) scale length
hs= (267 kpc) dispersion scale length
OUTPUT:
p(Rg|R)
HISTORY:
2015-01-12 - Written - Bovy (IAS)
"""
# Setup the DF
df= dehnendf(beta=0.,profileParams=(hr/_R0,hs/_R0,sr/_V0))
out= numpy.empty(len(Rg))
for ii in range(len(Rg)):
out[ii]= df(Orbit([R/8.,0.,Rg[ii]/R]))
return out
# Churning p(final Rg | initial Rg, tau)
@scalarDecorator
def churning_pRgfRgi(Rgf,Rgi,tau,fmig=1.):
"""
NAME:
churning_pRgfRgi
PURPOSE:
The distribution of final guiding-center radii from churning
INPUT:
Rgf - Guiding center radius (/kpc), can be array
Rgi - Initial guiding-center radius (/kpc)
tau - time (/Gyr)
fmig= (1.) efficiency of migration relative to fiducial model
OUTPUT:
p(Rgf|Rgi)
HISTORY:
2015-01-12 - Written - Bovy (IAS)
"""
sig= (0.01+0.2*fmig*tau*Rgi*numpy.exp(-(Rgi-8.)**2./16.))
return 1./numpy.sqrt(2.*numpy.pi)\
*numpy.exp(-(Rgi-Rgf)**2./2./sig)
# Churning p(Rg|R,tau)
@scalarDecorator
def churning_pRgRtau(Rg,R,tau,fmig=1.,sr=31.4,hr=3.,hs=267.):
"""
NAME:
churning_pRgRtau
PURPOSE:
The distribution of guiding-center radii at a given radius and time from churning
INPUT:
Rg - Guiding center radius (/kpc), can be array
R - Given radius (/kpc)
tau - time (/Gyr)
fmig= (1.) efficiency of migration relative to fiducial model
sr= (31.4 km/s) velocity dispersion at R0
hr= (3 kpc) scale length
hs= (267 kpc) dispersion scale length
OUTPUT:
p(Rg|R,tau)
HISTORY:
2015-01-12 - Written - Bovy (IAS)
"""
# Setup the DF
df= dehnendf(beta=0.,profileParams=(hr/_R0,hs/_R0,sr/_V0))
out= numpy.empty(len(Rg))
for ii in range(len(Rg)):
out[ii]= integrate.fixed_quad(lambda x: df(Orbit([R/8.,0.,x/R]))\
*churning_pRgfRgi(x,Rg[ii],tau,
fmig=fmig),
numpy.amax([Rg[ii]-4.,0.]),
Rg[ii]+6.,n=40)[0]
return out
# Churning p(Rg|R)
@scalarDecorator
def churning_pRgR(Rg,R,fmig=1.,sr=31.4,hr=3.,hs=267.):
"""
NAME:
churning_pRgR
PURPOSE:
The distribution of guiding-center radii at a given radius from churning (assume constant SFH)
INPUT:
Rg - Guiding center radius (/kpc), can be array
R - Given radius (/kpc)
fmig= (1.) efficiency of migration relative to fiducial model
sr= (31.4 km/s) velocity dispersion at R0
hr= (3 kpc) scale length
hs= (267 kpc) dispersion scale length
OUTPUT:
p(Rg|R)
HISTORY:
2015-01-12 - Written - Bovy (IAS)
"""
# Setup the DF
df= dehnendf(beta=0.,profileParams=(hr/_R0,hs/_R0,sr/_V0))
out= numpy.empty(len(Rg))
for ii in range(len(Rg)):
out[ii]= integrate.quadrature(\
lambda tau: integrate.fixed_quad(lambda x: \
df(Orbit([R/8.,0.,x/R]))
*churning_pRgfRgi(x,Rg[ii],
tau,fmig=fmig),
numpy.amax([Rg[ii]-4.,0.]),
Rg[ii]+6.,n=40)[0],
0.,10.,tol=10.**-4.,rtol=10**-3.,vec_func=False)[0]
return out
#
# MDFs
#
# Initial MDF at different radii
def pFehRg(Feh,Rg,
skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,skewa=_SKEWA_DEFAULT,
dFehdR=_DFEHDR_DEFAULT):
"""
NAME:
pFehRg
PURPOSE:
The initial MDF at a given radius Rg
INPUT:
Feh - Metallicity
Rg - Radius (/kpc)
skewm= (0.8) mean of the initial MDF at 4 kpc
skews= (0.2) standard dev. of the initial MDF
skewa= (-4.) skewness parameter of the initial MDF
dFehdR= (-0.15) initial metallicity gradient
OUTPUT:
p(Feh|Rg) at the initial time
HISTORY:
2015-01-12 - Written - Bovy (IAS)
"""
return skewnormal(Feh,m=skewm+dFehdR*(Rg-4.),s=skews,a=skewa)\
*0.5*(1.+numpy.tanh((Feh-numpy.log10(_ZINIT))/0.2))
def pAgeRg(age,Rg,
skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,skewa=_SKEWA_DEFAULT,
dFehdR=_DFEHDR_DEFAULT):
"""
NAME:
pAgeRg
PURPOSE:
The initial age DF at a given radius Rg
INPUT:
age - age (/Gyr)
Rg - Radius (/kpc)
skewm= (0.8) mean of the initial MDF at 4 kpc
skews= (0.2) standard dev. of the initial MDF
skewa= (-4.) skewness parameter of the initial MDF
dFehdR= (-0.15) initial metallicity gradient
OUTPUT:
p(age|Rg) at the initial time
HISTORY:
2015-01-12 - Written - Bovy (IAS)
"""
ageFeh= fehAgeRg(age,Rg,skewm=skewm,dFehdR=dFehdR)
return pFehRg(ageFeh,Rg,skewm=skewm,skews=skews,skewa=skewa,
dFehdR=dFehdR)\
/numpy.fabs(_dagedFehRg(ageFeh,Rg,skewm=skewm,dFehdR=dFehdR))
# The relation between age and metallicity at a given radius
def fehAgeRg(age,Rg,skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,dFehdR=_DFEHDR_DEFAULT):
"""
NAME:
fehAgeRg
PURPOSE:
The metallicity corresponding to a given age at radius Rg; assuming linear increase in exp(Feh) with time from Zinit Zsolar
INPUT:
age - age (/Gyr)
Rg - guiding-center radius (/kpc)
skewm= (0.8) mean of the initial MDF at 4 kpc
skews= (0.2) standard dev. of the initial MDF
dFehdR= (-0.15) initial metallicity gradient
OUTPUT:
FeH(age,Rg)
HISTORY:
2015-01-12 - Written - Bovy (IAS)
"""
if _LINEARENRICHMENT:
return numpy.log10(_ZINIT+(10.-age)/10.*(10.**(skews+skewm+dFehdR*(Rg-4.))-_ZINIT))
else:
eq= 10.**(skews+skewm+dFehdR*(Rg-4.))
return numpy.log10((eq-_ZINIT)*(1.-numpy.exp(-(10.-age)/_TAUEQ))+_ZINIT)
def ageFehRg(feh,Rg,skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,dFehdR=_DFEHDR_DEFAULT):
"""
NAME:
ageFehRg
PURPOSE:
The age corresponding to a given metallicity at radius Rg; assuming linear increase in exp(Feh) with time from _ZINIT Zsolar
INPUT:
feh - metallicity
Rg - guiding-center radius (/kpc)
skewm= (0.8) mean of the initial MDF at 4 kpc
skews= (0.2) standard dev. of the initial MDF
dFehdR= (-0.15) initial metallicity gradient
OUTPUT:
age(FeH,Rg)
HISTORY:
2015-01-12 - Written - Bovy (IAS)
"""
if _LINEARENRICHMENT:
return 10.-10.*(10.**feh-_ZINIT)/((10.**(skews+skewm+dFehdR*(Rg-4.))-_ZINIT))
else:
eq= 10.**(skews+skewm+dFehdR*(Rg-4.))
return 10.+numpy.log(1.-(10.**feh-_ZINIT)/(eq-_ZINIT))*_TAUEQ
def RgAgeFeh(age,feh,
skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,dFehdR=_DFEHDR_DEFAULT):
"""
NAME:
RgAgeFeh
PURPOSE:
The guiding-center radius corresponding to a given metallicity and age; assuming linear increase in exp(Feh) with time from _ZINIT Zsolar
INPUT:
age - age (/Gyr)
feh - metallicity
skewm= (0.8) mean of the initial MDF at 4 kpc
skews= (0.2) standard dev. of the initial MDF
dFehdR= (-0.15) initial metallicity gradient
OUTPUT:
Rg(age,FeH)
HISTORY:
2015-01-13 - Written - Bovy (IAS)
"""
if _LINEARENRICHMENT:
return (numpy.log10(10.*(10.**feh-_ZINIT)/(10.-age))-skewm-skews)/dFehdR+4.
else:
return (numpy.log10((10.**feh-_ZINIT)/(1.-numpy.exp(-(10.-age)/_TAUEQ))+_ZINIT)-skews-skewm)/dFehdR+4.
# Also need derivatives for integrals and distribution
def _dfehdAgeRg(age,Rg,skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,dFehdR=_DFEHDR_DEFAULT):
if _LINEARENRICHMENT:
return -1./10./numpy.log(10.)*(10.**(skews+skewm+dFehdR*(Rg-4.))-_ZINIT)\
/(_ZINIT+(10.-age)/10.*(numpy.exp(skews+skewm+dFehdR*(Rg-4.))-_ZINIT))
else:
eq= 10.**(skews+skewm+dFehdR*(Rg-4.))
return -(eq-_ZINIT)*numpy.exp(-(10.-age)/_TAUEQ)/(((eq-_ZINIT)*(1.-numpy.exp(-(10.-age)/_TAUEQ))+_ZINIT))/numpy.log(10.)/_TAUEQ
def _dagedFehRg(feh,Rg,skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,dFehdR=_DFEHDR_DEFAULT):
if _LINEARENRICHMENT:
return -10.*10.**feh*numpy.log(10.)\
/((10.**(skews+skewm+dFehdR*(Rg-4.))-_ZINIT))
else:
eq= 10.**(skews+skewm+dFehdR*(Rg-4.))
return -_TAUEQ*numpy.log(10.)*10.**feh/(eq-_ZINIT)/(1.-(10.**feh-_ZINIT)/(eq-_ZINIT))
def _dfehdRgAge(Rg,age,skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,dFehdR=_DFEHDR_DEFAULT):
feh= fehAgeRg(age,Rg,skewm=skewm,skews=skews,dFehdR=dFehdR)
if _LINEARENRICHMENT:
return (10.-age)/10.*10.**(skews+skewm+dFehdR*(Rg-4.))*dFehdR/10.**feh
else:
eq= 10.**(skews+skewm+dFehdR*(Rg-4.))
return (1.-numpy.exp(-(10.-age)/_TAUEQ))*eq*dFehdR/10.**feh
def test_dfehdAgeRg():
ages= numpy.tile(numpy.linspace(1.,10.,101),(101,1))
Rs= numpy.tile(numpy.linspace(2.,16.,101),(101,1)).T
dx= 10.**-8.
dage= _dfehdAgeRg(ages,Rs)
dage_num= (fehAgeRg(ages+dx,Rs)-fehAgeRg(ages,Rs))/dx
assert numpy.all(numpy.fabs(dage-dage_num) < 10.**-4.), 'dfehdAgeRg implemented incorrectly'
return None
def test_dagedFgeRg():
Rs= numpy.tile(numpy.linspace(2.,16.,101),(101,1)).T
fehs= numpy.tile(numpy.linspace(-1.5,0.7,101),(101,1))
Rs[fehs > fehAgeRg(0.,Rs)-0.03]= numpy.nan
dx= 10.**-8.
dfeh= _dagedFehRg(fehs,Rs)
dfeh_num= (ageFehRg(fehs+dx,Rs)-ageFehRg(fehs,Rs))/dx
assert numpy.all((numpy.fabs(dfeh-dfeh_num) < 10.**-4.)+numpy.isnan(dfeh)), 'dagedFehRg implemented incorrectly'
return None
def test_dfehdRgAge():
Rs= numpy.tile(numpy.linspace(2.,16.,101),(101,1)).T
ages= numpy.tile(numpy.linspace(1.,9.9,101),(101,1))
dx= 10.**-8.
dfeh= _dfehdRgAge(Rs,ages)
dfeh_num= (fehAgeRg(ages,Rs+dx)-fehAgeRg(ages,Rs))/dx
assert numpy.all((numpy.fabs(dfeh-dfeh_num) < 10.**-6.)+numpy.isnan(dfeh)), 'dfehdRgAge implemented incorrectly'
return None
# Blurring MDF
@scalarDecorator
def blurring_pFehR(feh,R,
skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,skewa=_SKEWA_DEFAULT,
dFehdR=_DFEHDR_DEFAULT,
sr=31.4,hr=3.,hs=267.):
"""
NAME:
blurring_pFehR
PURPOSE:
The distribution of metallicities at a given R due to blurring
INPUT:
feh - metallicity
R - radius (/kpc)
skewm= (0.8) mean of the initial MDF at 4 kpc
skews= (0.2) standard dev. of the initial MDF
skewa= (-4.) skewness parameter of the initial MDF
dFehdR= (-0.15) initial metallicity gradient
sr= (31.4 km/s) velocity dispersion at R0
hr= (3 kpc) scale length
hs= (267 kpc) dispersion scale length
OUTPUT:
p(Feh|R)
HISTORY:
2015-01-12 - Written - Bovy (IAS)
"""
out= numpy.empty_like(feh)
for ii in range(len(feh)):
out[ii]= integrate.quadrature(lambda x: pFehRg(feh[ii],x,
skewm=skewm,skews=skews,
skewa=skewa,
dFehdR=dFehdR)\
*blurring_pRgR(x,R,sr=sr,
hr=hr,hs=hs),
numpy.amax([0.,R-4.]),R+4.,
tol=10.**-4.,rtol=10.**-3.,
vec_func=False)[0]
return out
# Churning age distribution
@scalarDecorator
def churning_pAgeR(age,R,
skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,skewa=_SKEWA_DEFAULT,
dFehdR=_DFEHDR_DEFAULT,fmig=1.,
sr=31.4,hr=3.,hs=267.):
"""
NAME:
churning_pAgeR
PURPOSE:
The distribution of ages at a given R due to churning
INPUT:
age - age (/Gyr)
R - radius (/kpc)
skewm= (0.8) mean of the initial MDF at 4 kpc
skews= (0.2) standard dev. of the initial MDF
skewa= (-4.) skewness parameter of the initial MDF
dFehdR= (-0.15) initial metallicity gradient
fmig= (1.) efficiency of migration relative to fiducial model
sr= (31.4 km/s) velocity dispersion at R0
hr= (3 kpc) scale length
hs= (267 kpc) dispersion scale length
OUTPUT:
p(age|R)
HISTORY:
2015-01-12 - Written - Bovy (IAS)
"""
out= numpy.empty_like(age)
for ii in range(len(age)):
out[ii]= integrate.quadrature(\
lambda x: pFehRg(fehAgeRg(age[ii],x,skewm=skewm,skews=skews,
dFehdR=dFehdR),x,
skewm=skewm,skews=skews,
skewa=skewa,
dFehdR=dFehdR)\
*churning_pRgR(x,R,fmig=fmig,sr=sr,
hr=hr,hs=hs)\
/numpy.fabs(_dagedFehRg(fehAgeRg(age[ii],x,skewm=skewm,skews=skews,dFehdR=dFehdR),x)),
numpy.amax([0.,R-4.]),R+6.,
tol=10.**-4.,rtol=10.**-3.,
vec_func=False)[0]
return out
# Churning metallicity distribution
@scalarDecorator
def churning_pFehR(feh,R,
skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,
skewa=_SKEWA_DEFAULT,
dFehdR=_DFEHDR_DEFAULT,fmig=1.,
sr=31.4,hr=3.,hs=267.,
useInitialAgeDF=True):
"""
NAME:
churning_pFehR
PURPOSE:
The distribution of metallicities at a given R due to churning
INPUT:
feh - metallicity
R - radius (/kpc)
skewm= (0.8) mean of the initial MDF at 4 kpc
skews= (0.2) standard dev. of the initial MDF
skewa= (-4.) skewness parameter of the initial MDF
dFehdR= (-0.15) initial metallicity gradient
fmig= (1.) efficiency of migration relative to fiducial model
sr= (31.4 km/s) velocity dispersion at R0
hr= (3 kpc) scale length
hs= (267 kpc) dispersion scale length
OUTPUT:
p(Feh|R)
HISTORY:
2015-01-12 - Written - Bovy (IAS)
"""
out= numpy.empty_like(feh)
for ii in range(len(feh)):
# shortcut for Age DF
if useInitialAgeDF:
ageDF= lambda a: pAgeRg(a,R,skewm=skewm,skews=skews,skewa=skewa,
dFehdR=dFehdR)
else:
ageDF= lambda a: churning_pAgeR(a,R,skewm=skewm,skews=skews,
skewa=skewa,dFehdR=dFehdR,fmig=fmig,
sr=sr,hr=hr,hs=hs)
# Short age function, so we don't have to repeat this
ageFunc= lambda r: ageFehRg(feh[ii],r,skewm=skewm,skews=skews,
dFehdR=dFehdR)
# Integrate
def intFunc(x):
tage= ageFunc(x)
if tage <= 0. or tage > 10. or numpy.isnan(tage):
return 0.
return ageDF(ageFunc(x))\
*churning_pRgRtau(x,R,tage,
fmig=fmig,sr=sr,
hr=hr,hs=hs)\
/numpy.fabs(_dfehdAgeRg(tage,x))
out[ii]= integrate.quad(intFunc,
numpy.amax([0.,R-12.]),(feh[ii]-skewm-skews)/dFehdR+4.)[0]
return out
# Churning metallicity distribution
@scalarDecorator
def churning_pFehAgeR(feh,age,R,
skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,
skewa=_SKEWA_DEFAULT,
dFehdR=_DFEHDR_DEFAULT,fmig=1.,
sr=31.4,hr=3.,hs=267.,
useInitialAgeDF=True):
"""
NAME:
churning_pFehAgeR
PURPOSE:
The distribution of metallicities and ages at a given R due to churning
INPUT:
feh - metallicity (can be array)
age - age (/Gyr)
R - radius (/kpc)
skewm= (0.8) mean of the initial MDF at 4 kpc
skews= (0.2) standard dev. of the initial MDF
skewa= (-4.) skewness parameter of the initial MDF
dFehdR= (-0.15) initial metallicity gradient
fmig= (1.) efficiency of migration relative to fiducial model
sr= (31.4 km/s) velocity dispersion at R0
hr= (3 kpc) scale length
hs= (267 kpc) dispersion scale length
OUTPUT:
p(Feh,age|R)
HISTORY:
2015-01-13 - Written - Bovy (IAS)
"""
out= numpy.empty_like(feh)
# p(age|R)
if useInitialAgeDF:
ageP= pAgeRg(age,R,skewm=skewm,skews=skews,skewa=skewa,
dFehdR=dFehdR)
else:
ageP= churning_pAgeR(age,R,skewm=skewm,skews=skews,
skewa=skewa,dFehdR=dFehdR,fmig=fmig,
sr=sr,hr=hr,hs=hs)
for ii in range(len(feh)):
trg= RgAgeFeh(age,feh[ii],
skewm=skewm,skews=skews,dFehdR=dFehdR)
if trg <= 0. or numpy.isnan(trg) or numpy.isinf(trg) \
or feh[ii] > (skews+skewm+dFehdR*(trg-4.)):
out[ii]= 0.
continue
out[ii]= \
churning_pRgRtau(trg,R,age,fmig=fmig,sr=sr,hr=hr,hs=hs)\
*ageP/_dfehdRgAge(trg,age,skewm=skewm,skews=skews,dFehdR=dFehdR)
return out
def skewness(x,mdf):
m= numpy.nansum(x*mdf)/numpy.nansum(mdf)
return numpy.nansum((x-m)**3.*mdf)/numpy.nansum(mdf)\
/(numpy.nansum((x-m)**2*mdf)/numpy.nansum(mdf))**1.5
|
StarcoderdataPython
|
67994
|
<reponame>thesealion/django-social-auth
import json
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.encoding import smart_unicode
class SubfieldBase(type):
"""
A metaclass for custom Field subclasses. This ensures the model's attribute
has the descriptor protocol attached to it.
"""
def __new__(cls, name, bases, attrs):
new_class = super(SubfieldBase, cls).__new__(cls, name, bases, attrs)
new_class.contribute_to_class = make_contrib(
new_class, attrs.get('contribute_to_class')
)
return new_class
class Creator(object):
"""
A placeholder class that provides a way to set the attribute on the model.
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
return self
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
def make_contrib(superclass, func=None):
"""
Returns a suitable contribute_to_class() method for the Field subclass.
If 'func' is passed in, it is the existing contribute_to_class() method on
the subclass and it is called before anything else. It is assumed in this
case that the existing contribute_to_class() calls all the necessary
superclass methods.
"""
def contribute_to_class(self, cls, name, **kwargs):
if func:
func(self, cls, name, **kwargs)
else:
super(superclass, self).contribute_to_class(cls, name, **kwargs)
setattr(cls, self.name, Creator(self))
return contribute_to_class
class JSONField(models.TextField):
"""Simple JSON field that stores python structures as JSON strings
on database.
"""
__metaclass__ = SubfieldBase
def to_python(self, value):
"""
Convert the input JSON value into python structures, raises
django.core.exceptions.ValidationError if the data can't be converted.
"""
if self.blank and not value:
return None
if isinstance(value, basestring):
try:
return json.loads(value)
except Exception, e:
raise ValidationError(str(e))
else:
return value
def validate(self, value, model_instance):
"""Check value is a valid JSON string, raise ValidationError on
error."""
super(JSONField, self).validate(value, model_instance)
try:
return json.loads(value)
except Exception, e:
raise ValidationError(str(e))
def get_prep_value(self, value):
"""Convert value to JSON string before save"""
try:
return json.dumps(value)
except Exception, e:
raise ValidationError(str(e))
def value_to_string(self, obj):
"""Return value from object converted to string properly"""
return smart_unicode(self.get_prep_value(self._get_val_from_obj(obj)))
|
StarcoderdataPython
|
84629
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import subprocess
import sys
import os
import argparse
from timeit import default_timer as timer
def beep():
notes = [(0.25, 440), (0.25, 480), (0.25, 440), (0.25, 480),
(0.25, 440), (0.25, 480), (0.25, 440), (0.5, 520)]
try:
import winsound
for i in range(len(notes)):
winsound.Beep(notes[i][1], notes[i][0]*1000)
except Exception:
for i in range(len(notes)):
os.system(
'play -nq -t alsa synth {} sine {}'.format(notes[i][0], notes[i][1]))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--executable",
help="PearRay executable", default="pearray")
parser.add_argument("-c", "--count", help="Number of calls",
metavar='N', type=int, default=5)
parser.add_argument("-p", "--project", help="Project file", required=True)
parser.add_argument(
"-o", "--output", help="Output directory", default="./benchmark")
parser.add_argument(
"-t", "--threads", help="Number of threads", metavar='N', type=int, default=0)
parser.add_argument("-q", "--quite", help="Be quite", action="store_true")
parser.add_argument(
"--superquite", help="Be super quite", action="store_true")
parser.add_argument(
"-b", "--beep", help="Beep when done", action="store_true")
args = parser.parse_args()
if not args.quite:
cmd = [args.executable, "--version"]
ret_code = subprocess.call(cmd)
if ret_code != 0:
print("Error calling PearRay executable.")
sys.exit(-1)
if not os.path.exists(args.output):
os.makedirs(args.output)
PR_DEF_ARGS = ["-P",
"-t", str(args.threads),
"--no-adaptive-tiling",# Benchmarks have to be reproducable
]
if args.quite:
PR_DEF_ARGS.append("-q")
else:
PR_DEF_ARGS.append("-p") # Show progress
res_file = open(args.output + "/result.txt", "w")
full_time = 0
for i in range(args.count):
if not args.superquite:
print(">> Task %i/%i" % (i+1, args.count))
output_dir = args.output + "/%i" % (i+1)
cmd = [args.executable, *PR_DEF_ARGS,
"-i", args.project, "-o", output_dir]
start = timer()
ret_code = subprocess.call(cmd)
if ret_code != 0:
print("Error calling PearRay executable.")
sys.exit(-1)
end = timer()
diff = end-start
full_time += diff
if not args.superquite:
print("%f s" % diff)
res_file.write("Task %i = %f s\n" % (i+1, diff))
# Calculate average
avg = full_time/args.count
if not args.superquite:
print("Full: %f s | Avg: %f s" % (full_time, avg))
res_file.write("Full = %f s\n" % full_time)
res_file.write("Avg = %f s\n" % avg)
if args.beep:
beep()
|
StarcoderdataPython
|
12854
|
# -*- coding: utf-8 *-*
import logging
from unittest import TestCase
from nicepy import assert_equal_struct, multi_assert_equal_struct, pretty_repr, permuteflat
log = logging.getLogger(__name__)
class Foo(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
self[k] = v
def __setitem__(self, name, value):
# helper to add attributes per self[attr] = value -> self.attr == value
setattr(self, name, value)
def __repr__(self):
return pretty_repr(self, ignore_own_repr=True)
class TestAssertEqualStruct(TestCase):
def run_assert(self, args, expected_msg=None):
log.debug('args: %s' % str(args))
msg = None
try:
assert_equal_struct(*args)
except AssertionError as e:
msg = e.message
log.debug('msg: %s' % msg)
self.assertEqual(msg, expected_msg)
def check(self, actual_classes=(list,),
expected_classes=(list,),
expected_obj=None, expected_kwargs={},
working_obj=None, working_kwargs={},
failing_obj=None, failing_kwargs={},
failure_msg=None,
namepaths=None,
expected_namepaths=None):
for actual_cls, expected_cls in permuteflat(actual_classes, expected_classes):
expected_obj = expected_obj or expected_cls(**expected_kwargs)
working_obj = working_obj or actual_cls(**working_kwargs)
self.run_assert((working_obj, expected_obj, namepaths, expected_namepaths))
failing_obj = failing_obj or actual_cls(**failing_kwargs)
self.run_assert((failing_obj, expected_obj, namepaths, expected_namepaths),
failure_msg)
def test_directly(self):
"""
*assert_equal_struct* can compare similar flat structures directly.
"""
self.check(actual_classes=(dict, Foo),
expected_classes=(dict, Foo),
expected_kwargs=dict(x=1),
working_kwargs=dict(x=1, y=2),
failing_kwargs=dict(x=3, y=2),
failure_msg='actual values != expected values:\n\tx: 3 != 1')
self.check(expected_obj=[1],
working_obj=[1, 2],
failing_obj=[3, 2],
failure_msg='actual values != expected values:\n\t0: 3 != 1')
def test_with_namepaths(self):
"""
With namepaths *assert_equal_struct* can compare similar structures and structures with
lists of values in full depth.
This ignores all additional paths at the expected object.
"""
self.check(actual_classes=(dict, Foo),
expected_classes=(dict, Foo),
expected_kwargs=dict(x=1, y=4),
namepaths=['x'],
working_kwargs=dict(x=1, y=2),
failing_kwargs=dict(x=3, y=2),
failure_msg='actual values != expected values:\n\tx: 3 != 1')
self.check(actual_classes=(dict, Foo),
expected_obj=[1, 4],
namepaths=['x'],
working_kwargs=dict(x=1, y=2),
failing_kwargs=dict(x=3, y=2),
failure_msg='actual values != expected values:\n\tx: 3 != 1')
self.check(expected_obj=[1, 4],
namepaths=['0'],
working_obj=[1, 2],
failing_obj=[3, 2],
failure_msg='actual values != expected values:\n\t0: 3 != 1')
def test_with_namepaths_and_expected_namepaths(self):
"""
Like just with namepaths, the values are sometimes at other paths at the expected object and
will be compared using expected_namepaths in same order as namepaths.
"""
self.check(actual_classes=(dict, Foo),
expected_classes=(dict, Foo),
expected_kwargs=dict(a=1, b=4),
namepaths=['x'],
expected_namepaths=['a'],
working_kwargs=dict(x=1, y=2),
failing_kwargs=dict(x=3, y=2),
failure_msg='actual values != expected values:\n\tx != a: 3 != 1')
self.check(actual_classes=(dict, Foo),
expected_obj=[4, 1],
namepaths=['x'],
expected_namepaths=['1'],
working_kwargs=dict(x=1, y=2),
failing_kwargs=dict(x=3, y=2),
failure_msg='actual values != expected values:\n\tx != 1: 3 != 1')
self.check(expected_obj=[4, 1],
namepaths=['0'],
expected_namepaths=['1'],
working_obj=[1, 2],
failing_obj=[3, 2],
failure_msg='actual values != expected values:\n\t0 != 1: 3 != 1')
class TestMultiAssertEqualStruct(TestCase):
def run_assert(self, args, expected_msg=None):
log.debug('args: %s' % str(args))
msg = None
try:
multi_assert_equal_struct(*args)
except AssertionError as e:
msg = e.message
log.debug('msg: %s' % msg)
self.assertEqual(msg, expected_msg)
def check(self, actual_classes=(list,),
expected_classes=(list,),
expected_objs=None, expected_kwargs_list=[],
working_objs=None, working_kwargs_list=[],
failing_objs=None, failing_kwargs_list=[],
failure_msg=None,
namepaths=None,
expected_namepaths=None):
for actual_cls1, actual_cls2, expected_cls1, expected_cls2 in \
permuteflat(*([actual_classes] * 2 + [expected_classes] * 2)):
if not expected_objs:
expected_objs = (expected_cls1(**expected_kwargs_list[0]),
expected_cls2(**expected_kwargs_list[1]))
if not working_objs:
working_objs = (actual_cls1(**working_kwargs_list[0]),
actual_cls2(**working_kwargs_list[1]))
self.run_assert((working_objs, expected_objs, namepaths, expected_namepaths))
if not failing_objs:
failing_objs = (actual_cls1(**failing_kwargs_list[0]),
actual_cls2(**failing_kwargs_list[1]))
self.run_assert((failing_objs, expected_objs, namepaths, expected_namepaths),
failure_msg)
def test_directly(self):
"""
*multi_assert_equal_struct* can compare multiple similar flat structures directly.
"""
self.check(actual_classes=(dict, Foo),
expected_classes=(dict, Foo),
expected_kwargs_list=[dict(x=1), dict(x=2, y=3)],
working_kwargs_list=[dict(x=1, y=0), dict(x=2, y=3)],
failing_kwargs_list=[dict(x=4, y=0), dict(x=2, y=5)],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\tx: 4 != 1\n'\
'Index 1: actual values != expected values:\n\ty: 5 != 3')
self.check(expected_objs=[[1], [2, 3]],
working_objs=[[1, 0], [2, 3]],
failing_objs=[[4, 0], [2, 5]],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\t0: 4 != 1\n'\
'Index 1: actual values != expected values:\n\t1: 5 != 3')
def test_with_namepaths(self):
"""
With namepaths *multi_assert_equal_struct* can compare multiple similar structures and
structures with lists of values in full depth.
This ignores all additional paths at the expected objects.
"""
self.check(actual_classes=(dict, Foo),
expected_classes=(dict, Foo),
expected_kwargs_list=[dict(x=1), dict(x=2, y=3)],
working_kwargs_list=[dict(x=1, y=0), dict(x=2)],
failing_kwargs_list=[dict(x=4, y=0), dict(x=5)],
namepaths=['x'],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\tx: 4 != 1\n'\
'Index 1: actual values != expected values:\n\tx: 5 != 2')
self.check(actual_classes=(dict, Foo),
expected_objs=[[1], [2, 0]],
working_kwargs_list=[dict(x=1, y=5), dict(x=2)],
failing_kwargs_list=[dict(x=3, y=5), dict(x=4)],
namepaths=['x'],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\tx: 3 != 1\n'\
'Index 1: actual values != expected values:\n\tx: 4 != 2')
self.check(expected_objs=[[1], [2, 3]],
working_objs=[[1, 0], [2, 0]],
failing_objs=[[4, 0], [5, 0]],
namepaths=['0'],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\t0: 4 != 1\n'\
'Index 1: actual values != expected values:\n\t0: 5 != 2')
def test_with_namepaths_and_expected_namepaths(self):
"""
Like just with namepaths, the values are sometimes at other paths at the expected object and
will be compared using expected_namepaths in same order as namepaths.
"""
self.check(actual_classes=(dict, Foo),
expected_classes=(dict, Foo),
expected_kwargs_list=[dict(y=1), dict(y=2, x=3)],
working_kwargs_list=[dict(x=1, y=0), dict(x=2)],
failing_kwargs_list=[dict(x=4, y=0), dict(x=5)],
namepaths=['x'],
expected_namepaths=['y'],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\tx != y: 4 != 1\n'\
'Index 1: actual values != expected values:\n\tx != y: 5 != 2')
self.check(actual_classes=(dict, Foo),
expected_objs=[[0, 1], [0, 2]],
working_kwargs_list=[dict(x=1, y=5), dict(x=2)],
failing_kwargs_list=[dict(x=3, y=5), dict(x=4)],
namepaths=['x'],
expected_namepaths=['1'],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\tx != 1: 3 != 1\n'\
'Index 1: actual values != expected values:\n\tx != 1: 4 != 2')
self.check(expected_objs=[[1, 2], [3, 4]],
working_objs=[[2, 1], [4, 3]],
failing_objs=[[2, 5], [6, 3]],
namepaths=['0', '1'],
expected_namepaths=['1', '0'],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\t1 != 0: 5 != 1\n'\
'Index 1: actual values != expected values:\n\t0 != 1: 6 != 4')
|
StarcoderdataPython
|
9635125
|
<reponame>NicholasBake/GreenEditor<gh_stars>1-10
from gui.Gui import guiLoad
if __name__ == "__main__":
guiLoad()
|
StarcoderdataPython
|
6609515
|
import os
def get_test_conf_file():
my_dir = os.path.dirname(os.path.realpath(__file__))
panoptes_test_conf_file = os.path.join(my_dir, 'config_files/test_panoptes_config.ini')
return my_dir, panoptes_test_conf_file
|
StarcoderdataPython
|
3590655
|
def test(array):
for i in range(len(array)):
if isprime(i) > 2:
array[i] = "Composite"
else:
array[i] = "Prime"
def isprime(input):
counter = 0
for i in range(1,input+1):
if((input%i == 0)):
counter = counter + 1
return counter
arr = [None] * 10
test(arr)
for i in range(1,len(arr)):
print(i, arr[i])
|
StarcoderdataPython
|
9659124
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""This package defines various utilities classes.
"""
|
StarcoderdataPython
|
3575436
|
<filename>server/admin_tools/service_tools/service_status.py
#!/usr/bin/python
#**************************************************************************
# This file is part of eBioKit 2017 Admin tools.
# Copyright <NAME>, SLU, Sweden 2017
#
# This tool updates the configuration for the eBioKit services.
#
# Version: 0.1
#
# Changelog:
# - 2017/04/13: First version
#
# Enjoy the eBioKit and please contact us for any question or suggestion!
# --
# The eBioKit development team
# More info http://ebiokit.eu/
# Technical contact <EMAIL>
#**************************************************************************
from common import *
from sys import stderr
def main(options):
read_conf()
if "-h" in options or "--help" in options:
show_help()
if len(options) < 1 or not(options[0] in ["-a", "--all", "-s", "--service"]):
show_help()
# STEP 0. FETCH ALL INSTALLED SERVICES
INSTALLED_SERVICES = get_installed_services()
# STEP 1. CHECK IF SELECTED SERVICE IS AVAILABLE
target_services = None
if options[0] in ["-s", "--service"]:
if len(options) < 2:
show_help("Lost parameter 'service_name'.")
for service in INSTALLED_SERVICES:
if service.instance_name == options[1]:
target_services = [service]
break
if target_services == None:
show_help("Service \"" + options[1] + "\" is not installed.")
else:
target_services = INSTALLED_SERVICES
nice_output = not(len(options) > 2 and options[2] == "--no-cmd")
# STEP 2. CHECK ALL SELECTED SERVICES
for service in target_services:
check_service(service, nice_output)
exit(0)
def show_help(message=""):
# STEP 0. FETCH ALL INSTALLED SERVICES
INSTALLED_SERVICES = get_installed_services()
print message
print "Usage: service_status [-s service_name | -a]"
print " where"
print " -a, --all : Check the status for all eBioKit services"
print " -s, --service : Check the status for the selected eBioKit service "
services = []
for service in INSTALLED_SERVICES:
services.append(service.instance_name)
print " Available services: [" + ", ".join(services) + "]"
print ""
exit(1)
def check_service(service, nice_output = True):
if nice_output:
printServiceMessage(service.title, 20)
if not service.enabled:
print "SERVICE IS NOT ENABLED"
return
try:
# Call to status for docker-compose
output, error = ebiokit_remote_launcher("service status", service.instance_name)
print(output.rstrip("\n"))
if error != "":
print >> sys.stderr, error.rstrip("\n")
except Exception as ex:
print ex.message
print "UNKNOWN"
return
if __name__ == "__main__":
main(sys.argv[1:])
|
StarcoderdataPython
|
9681186
|
# -*- coding: iso-8859-1 -*-
from __future__ import print_function, division
import sys
if( sys.version_info[0] == 2 ):
range = xrange
import math
import qm3.maths.matrix
try:
import qm3.actions._minimize
has_minimize_so = True
except:
has_minimize_so = False
def __grms( vec ):
# o = 0.0
# for i in vec:
# o += i * i
# o = math.sqrt( o )
o = math.sqrt( sum( [ i*i for i in vec ] ) )
return( o, o / math.sqrt( len( vec ) ) )
def default_log( txt ):
sys.stdout.write( txt + "\n" )
sys.stdout.flush()
def downhill( obj,
step_number = 1000,
step_size = 0.1,
print_frequency = 10,
step_tolerance = 1.0e-6,
log_function = default_log ):
log_function( "---------------------------------------- Minimization (DH)\n" )
log_function( "Degrees of Freedom: %20ld"%( obj.size ) )
log_function( "Step Number: %20d"%( step_number ) )
log_function( "Step Size: %20.10lg"%( step_size ) )
log_function( "Print Frequency: %20d"%( print_frequency ) )
log_function( "Step Tolerance: %20.10lg\n"%( step_tolerance ) )
log_function( "%10s%20s%20s"%( "Step", "Function", "Displacement" ) )
log_function( "-" * 50 )
x = [ obj.coor[:] ]
for i in range( obj.size ):
x.append( obj.coor[:] )
x[i+1][i] += step_size
f = []
for i in range( obj.size + 1 ):
obj.coor = x[i][:]
obj.get_func()
f.append( obj.func )
n = float( obj.size )
k = 0
df = step_tolerance * 2.0
while( k < step_number and df > step_tolerance ):
iLo = f.index( min( f ) )
iHi = f.index( max( f ) )
d = []
for i in range( obj.size ):
d.append( sum( [ x[j][i] for j in range( obj.size + 1 ) ] ) )
for i in range( obj.size ):
d[i] = ( d[i] - (n + 1.0) * x[iHi][i] ) / n
df = math.sqrt( sum( [ d[i] * d[i] for i in range( obj.size ) ] ) / n )
if( df > step_tolerance ):
# Try Reflection
xNew = [ x[iHi][i] + 2.0 * d[i] for i in range( obj.size ) ]
obj.coor = xNew[:]
obj.get_func()
fNew = obj.func
if( fNew <= f[iLo] ): # (accept reflection)
x[iHi] = xNew[:]
f[iHi] = fNew
# Try Expanding the Reflection
xNew = [ x[iHi][i] + d[i] for i in range( obj.size ) ]
obj.coor = xNew[:]
obj.get_func()
fNew = obj.func
if( fNew <= f[iLo] ): # (accept expansion)
x[iHi] = xNew[:]
f[iHi] = fNew
else:
# Try Reflection
if( fNew <= f[iHi] ): # (accept reflection)
x[iHi] = xNew[:]
f[iHi] = fNew
else:
# Try Contraction
xNew = [ x[iHi][i] + 0.5 * d[i] for i in range( obj.size ) ]
obj.coor = xNew[:]
obj.get_func()
fNew = obj.func
if( fNew <= f[iHi] ): # (accept contraction)
x[iHi] = xNew[:]
f[iHi] = fNew
else:
# Use Shrinkage
for i in range( obj.size + 1 ):
if( i != iLo ):
x[i] = [ x[i][j] - 0.5 * x[iLo][j] for j in range( obj.size ) ]
obj.coor = x[i][:]
obj.get_func()
f[i] = obj.func
k += 1
if( k%print_frequency == 0 ):
log_function( "%10d%20.5lf%20.10lf"%( k, f[iLo], df ) )
obj.current_step( k )
if( k%print_frequency != 0 ):
log_function( "%10d%20.5lf%20.10lf"%( k, f[iLo], df ) )
log_function( "-" * 50 )
obj.coor = x[iLo][:]
obj.get_func()
def steepest_descent( obj,
step_number = 100,
step_size = 0.1,
print_frequency = 10,
gradient_tolerance = 15.,
log_function = default_log ):
log_function( "---------------------------------------- Minimization (SD)\n" )
log_function( "Degrees of Freedom: %20ld"%( obj.size ) )
log_function( "Step Number: %20d"%( step_number ) )
log_function( "Step Size: %20.10lg"%( step_size ) )
log_function( "Print Frequency: %20d"%( print_frequency ) )
log_function( "Gradient Tolerance: %20.10lg\n"%( gradient_tolerance ) )
obj.get_grad()
norm, grms = __grms( obj.grad )
if( norm > step_size ):
ssiz = step_size
elif( norm > gradient_tolerance ):
ssiz = norm
else:
ssiz = gradient_tolerance
log_function( "%10s%20s%20s%20s"%( "Step", "Function", "Gradient", "Displacement" ) )
log_function( "-" * 70 )
log_function( "%10s%20.5lf%20.8lf%20.10lf"%( "", obj.func, grms, ssiz ) )
i = 0
while( i < step_number and grms > gradient_tolerance ):
# -- perform step
for j in range( obj.size ):
obj.coor[j] -= obj.grad[j] / norm * ssiz
# -- check new point
obj.get_grad()
norm, grms = __grms( obj.grad )
if( norm > step_size ):
ssiz = step_size
elif( norm > gradient_tolerance ):
ssiz = norm
else:
ssiz = gradient_tolerance
i = i + 1
if( i%print_frequency == 0 ):
log_function( "%10d%20.5lf%20.10lf%20.10lf"%( i, obj.func, grms, ssiz ) )
obj.current_step( i )
if( i%print_frequency != 0 ):
log_function( "%10d%20.5lf%20.10lf%20.10lf"%( i + 1, obj.func, grms, ssiz ) )
log_function( "-" * 70 + "\n" )
def adam( obj,
step_number = 100,
step_size = 0.1,
print_frequency = 10,
gradient_tolerance = 15.,
log_function = default_log ):
log_function( "---------------------------------------- Minimization (ADAM)\n" )
log_function( "Degrees of Freedom: %20ld"%( obj.size ) )
log_function( "Step Number: %20d"%( step_number ) )
log_function( "Step Size: %20.10lg"%( step_size ) )
log_function( "Print Frequency: %20d"%( print_frequency ) )
log_function( "Gradient Tolerance: %20.10lg\n"%( gradient_tolerance ) )
obj.get_grad()
norm, grms = __grms( obj.grad )
beta = 0.9
gamm = 0.999
epsi = 1.e-8
log_function( "%10s%20s%20s"%( "Step", "Function", "Gradient" ) )
log_function( "-" * 50 )
log_function( "%10s%20.5lf%20.8lf"%( "", obj.func, grms ) )
v = [ 0.0 for i in range( obj.size ) ]
s = [ 0.0 for i in range( obj.size ) ]
i = 0
while( i < step_number and grms > gradient_tolerance ):
# -- perform step
pbet = 1.0 / ( 1.0 - math.pow( beta, i + 1 ) )
pgam = 1.0 / ( 1.0 - math.pow( gamm, i + 1 ) )
for j in range( obj.size ):
v[j] = beta * v[j] + ( 1.0 - beta ) * obj.grad[j]
s[j] = gamm * s[j] + ( 1.0 - gamm ) * obj.grad[j] * obj.grad[j]
obj.coor[j] -= step_size * v[j] * pbet / ( math.sqrt( s[j] * pgam ) + epsi )
# -- check new point
obj.get_grad()
norm, grms = __grms( obj.grad )
i = i + 1
if( i%print_frequency == 0 ):
log_function( "%10d%20.5lf%20.10lf"%( i, obj.func, grms ) )
obj.current_step( i )
if( i%print_frequency != 0 ):
log_function( "%10d%20.5lf%20.10lf"%( i + 1, obj.func, grms ) )
log_function( "-" * 50 )
def fire( obj,
step_number = 100,
step_size = 0.1,
print_frequency = 10,
gradient_tolerance = 1.5,
exit_uphill = False,
log_function = default_log,
mixing_alpha = 0.1,
delay_step = 5,
fire2 = False ):
log_function( "---------------------------------------- Minimization (FIRE)\n" )
log_function( "Degrees of Freedom: %20ld"%( obj.size ) )
log_function( "Step Number: %20d"%( step_number ) )
log_function( "Step Size: %20.10lg"%( step_size ) )
log_function( "Print Frequency: %20d"%( print_frequency ) )
log_function( "Gradient Tolerance: %20.10lg"%( gradient_tolerance ) )
log_function( "Checking UpHill: %20s"%( exit_uphill ) )
log_function( "Version 2.0: %20s\n"%( fire2 ) )
log_function( "%10s%20s%20s%20s"%( "Step", "Function", "Gradient", "Displacement" ) )
log_function( "-" * 70 )
nstp = 0
ssiz = step_size
alph = mixing_alpha
velo = [ 0.0 for i in range( obj.size ) ]
step = [ 0.0 for i in range( obj.size ) ]
obj.get_grad()
qfun = True
norm, grms = __grms( obj.grad )
log_function( "%10s%20.5lf%20.8lf%20.10lf"%( "", obj.func, grms, ssiz ) )
i = 0
while( i < step_number and grms > gradient_tolerance and qfun ):
if( - sum( [ velo[j] * obj.grad[j] for j in range( obj.size ) ] ) > 0.0 ):
if( not fire2 ):
vsiz = math.sqrt( sum( [ velo[j] * velo[j] for j in range( obj.size ) ] ) )
velo = [ ( 1.0 - alph ) * velo[j] - alph * obj.grad[j] / norm * vsiz for j in range( obj.size ) ]
if( nstp > delay_step ):
ssiz = min( ssiz * 1.1, step_size )
alph *= 0.99
nstp += 1
else:
alph = mixing_alpha
ssiz *= 0.5
nstp = 0
if( fire2 ):
step = [ ssiz * velo[j] for j in range( obj.size ) ]
tmp = math.sqrt( sum( [ step[j] * step[j] for j in range( obj.size ) ] ) )
if( tmp > ssiz ):
for j in range( obj.size ):
step[j] *= ssiz / tmp
for j in range( obj.size ):
obj.coor[j] -= 0.5 * step[j]
velo = [ 0.0 for j in range( obj.size ) ]
velo = [ velo[j] - ssiz * obj.grad[j] for j in range( obj.size ) ]
if( fire2 ):
if( - sum( [ velo[j] * obj.grad[j] for j in range( obj.size ) ] ) > 0.0 ):
vsiz = math.sqrt( sum( [ velo[j] * velo[j] for j in range( obj.size ) ] ) )
velo = [ ( 1.0 - alph ) * velo[j] - alph * obj.grad[j] / norm * vsiz for j in range( obj.size ) ]
step = [ ssiz * velo[j] for j in range( obj.size ) ]
tmp = math.sqrt( sum( [ step[j] * step[j] for j in range( obj.size ) ] ) )
if( tmp > ssiz ):
for j in range( obj.size ):
step[j] *= ssiz / tmp
for j in range( obj.size ):
obj.coor[j] += step[j]
lfun = obj.func
obj.get_grad()
norm, grms = __grms( obj.grad )
if( exit_uphill ):
if( lfun < obj.func ):
log_function( ">> search become uphill!" )
qfun = False
for j in range( obj.size ):
obj.coor[j] -= step[j]
i = i + 1
if( i%print_frequency == 0 ):
log_function( "%10d%20.5lf%20.10lf%20.10lf"%( i, obj.func, grms, ssiz ) )
obj.current_step( i )
if( i%print_frequency != 0 ):
log_function( "%10d%20.5lf%20.10lf%20.10lf"%( i + 1, obj.func, grms, ssiz ) )
log_function( "-" * 70 + "\n" )
#def conjugate_gradient( obj,
# step_number = 100,
# step_size = 0.1,
# print_frequency = 10,
# gradient_tolerance = 1.5,
# log_function = default_log ):
# log_function( "---------------------------------------- Minimization (CG)\n" )
# log_function( "Degrees of Freedom: %20ld"%( obj.size ) )
# log_function( "Step Number: %20d"%( step_number ) )
# log_function( "Step Size: %20.10lg"%( step_size ) )
# log_function( "Print Frequency: %20d"%( print_frequency ) )
# log_function( "Gradient Tolerance: %20.10lg\n"%( gradient_tolerance ) )
# log_function( "%10s%20s%20s"%( "Step", "Function", "Gradient" ) )
# log_function( "-" * 50 )
# --------------------------------------
# log_function( "-" * 50 + "\n" )
def l_bfgs( obj,
step_number = 100,
step_size = 0.1,
print_frequency = 10,
gradient_tolerance = 1.5,
history = 9,
exit_uphill = False,
log_function = default_log ):
log_function( "---------------------------------------- Minimization (L-BFGS)\n" )
log_function( "Degrees of Freedom: %20ld"%( obj.size ) )
log_function( "Step Number: %20d"%( step_number ) )
log_function( "Step Size: %20.10lg"%( step_size ) )
log_function( "Print Frequency: %20d"%( print_frequency ) )
log_function( "Gradient Tolerance: %20.10lg"%( gradient_tolerance ) )
log_function( "Number of Updates: %20d"%( history ) )
log_function( "Checking UpHill: %20s\n"%( exit_uphill ) )
log_function( "%10s%20s%20s"%( "Step", "Function", "Gradient" ) )
log_function( "-" * 50 )
aux = [ 0.0 for i in range( history ) ]
rho = [ 0.0 for i in range( history ) ]
og = [ 0.0 for i in range( obj.size ) ]
ox = [ 0.0 for i in range( obj.size ) ]
step = [ 0.0 for i in range( obj.size ) ]
dg = []
dx = []
for j in range( history ):
dx.append( [ 0.0 for ii in range( obj.size ) ] )
dg.append( [ 0.0 for ii in range( obj.size ) ] )
obj.get_grad()
norm, grms = __grms( obj.grad )
qfun = True
log_function( "%10s%20.5lf%20.8lf"%( "", obj.func, grms ) )
i = 0
while( i < step_number and grms > gradient_tolerance and qfun ):
if( i > history ):
tmp = dx.pop( 0 ); dx.append( tmp[:] )
tmp = dg.pop( 0 ); dg.append( tmp[:] )
tmp = rho.pop( 0 ); rho.append( tmp )
if( i > 0 ):
j = min( i, history ) - 1
hgx = 0.0
hgg = 0.0
for k in range( obj.size ):
dx[j][k] = obj.coor[k] - ox[k]
dg[j][k] = obj.grad[k] - og[k]
hgx += dg[j][k] * dx[j][k]
hgg += dg[j][k] * dg[j][k]
rho[j] = 1.0 / hgx
hscal = hgx / hgg
ox = obj.coor[:]
og = obj.grad[:]
step = [ -ii for ii in obj.grad ]
if( i == 0 ):
step = [ ii/norm for ii in step ]
else:
for j in range( min( i, history ) - 1, -1, -1 ):
aux[j] = rho[j] * sum( [ ii*jj for ii,jj in zip( step, dx[j] ) ] )
for k in range( obj.size ):
step[k] -= aux[j] * dg[j][k]
for k in range( obj.size ):
step[k] *= hscal
for j in range( min( i, history ) ):
aux[j] -= rho[j] * sum( [ ii*jj for ii,jj in zip( step, dg[j] ) ] )
for k in range( obj.size ):
step[k] += aux[j] * dx[j][k]
tmp = math.sqrt( sum( [ ii*ii for ii in step ] ) )
if( tmp > step_size ):
for j in range( obj.size ):
step[j] *= step_size / tmp
for j in range( obj.size ):
obj.coor[j] += step[j]
lfun = obj.func
obj.get_grad()
norm, grms = __grms( obj.grad )
if( exit_uphill ):
if( lfun < obj.func ):
log_function( ">> search become uphill!" )
qfun = False
for j in range( obj.size ):
obj.coor[j] -= step[j]
i = i + 1
if( i%print_frequency == 0 ):
log_function( "%10d%20.5lf%20.10lf"%( i, obj.func, grms ) )
obj.current_step( i )
if( i%print_frequency != 0 ):
log_function( "%10d%20.5lf%20.10lf"%( i + 1, obj.func, grms ) )
log_function( "-" * 50 + "\n" )
def conjugate_gradient_plus( obj,
step_number = 100,
print_frequency = 10,
gradient_tolerance = 1.5,
method = "Polak-Ribiere",
restart = True,
log_function = default_log ):
log_function( "------------------------------------------ Minimization (CG+)\n" )
log_function( "Degrees of Freedom: %20ld"%( obj.size ) )
log_function( "Step Number: %20d"%( step_number ) )
log_function( "Print Frequency: %20d"%( print_frequency ) )
log_function( "Gradient Tolerance: %20.10lg"%( gradient_tolerance ) )
log_function( "Method: %22s\n"%( method ) )
log_function( "%10s%20s%20s"%( "Step", "Function", "Gradient" ) )
log_function( "-" * 50 )
irest = int( restart )
dmeth = { "Fletcher-Reeves" : 1, "Polak-Ribiere" : 2, "Positive Polak-Ribiere": 3 }
if( method in dmeth ):
imeth = dmeth[method]
else:
imeth = 3
if( has_minimize_so ):
qm3.actions._minimize.cgp( obj, step_number, gradient_tolerance, print_frequency, irest, imeth, log_function )
else:
raise Exception( "minimize.conjugate_gradient_plus: qm3.actions._minimize.so not available..." )
log_function( "-" * 50 + "\n" )
def baker( obj,
step_number = 100,
step_size = 0.1,
print_frequency = 10,
gradient_tolerance = 1.5,
follow_mode = -1, # -1 : minimum / 0... : mode following (TS)
allow_overlap = False,
log_function = default_log ):
if( follow_mode >= obj.size or follow_mode < -1 ):
follow_mode = -1
log_function( "---------------------------------------- Minimization (Baker)\n" )
log_function( "Degrees of Freedom: %20ld"%( obj.size ) )
log_function( "Following Mode: %20d"%( follow_mode ) )
log_function( "Step Number: %20d"%( step_number ) )
log_function( "Step Size: %20.10lg"%( step_size ) )
log_function( "Print Frequency: %20d"%( print_frequency ) )
log_function( "Gradient Tolerance: %20.10lg"%( gradient_tolerance ) )
log_function( "Allow Overlap: %20s\n"%( allow_overlap ) )
if( follow_mode > -1 ):
log_function( "%10s%20s%20s%20s"%( "Step", "Function", "Gradient", "Nneg,Fmode,Eval" ) )
log_function( "-" * 70 )
else:
log_function( "%10s%20s%20s%5s"%( "Step", "Function", "Gradient", "Nneg" ) )
log_function( "-" * 55 )
mstp = 1.0e-1
lrge = 1.0e+6
step = 50.0
tol1 = 1.0e-4
tol2 = 1.0e-8
emax = 1.0e5
emin = 1.0e-3
mxit = 999
dx = [ 0.0 for i in range( obj.size ) ]
grms = gradient_tolerance * 2.0
k = 0
flg = True
while( k < step_number and grms > gradient_tolerance and flg ):
# update coordinates
for i in range( obj.size ):
obj.coor[i] += dx[i]
# get into for the new point
obj.get_hess()
ei, ev = qm3.maths.matrix.diag( obj.hess, obj.size )
# scale eigenvalues and take the number of negative modes...
for i in range( obj.size ):
if( math.fabs( ei[i] ) < emin ):
if( ei[i] < 0.0 ):
ei[i] = - emin
else:
ei[i] = emin
if( math.fabs( ei[i] ) > emax ):
if( ei[i] < 0.0 ):
ei[i] = - emax
else:
ei[i] = emax
nneg = sum( [ 1 for i in range( obj.size ) if ei[i] < 0.0 ] )
# transform gradient vector to the local hessian modes, and startup dx
dx = []
gx = []
grms = 0.0
for i in range( obj.size ):
grms += obj.grad[i] * obj.grad[i]
dx.append( 0.0 )
gx.append( 0.0 )
for j in range( obj.size ):
gx[i] += obj.grad[j] * ev[j*obj.size+i]
grms = math.sqrt( grms )
# check whether we are searching a specific mode or not
if( follow_mode > -1 ):
# check for changes in current mode via overlapping
lowr = follow_mode;
if( k > 0 and allow_overlap == True ):
ovr = 0.0;
for i in range( obj.size ):
ovr += ev[i*obj.size+follow_mode] * mvec[i]
# ovr = math.fabs( ovr )
# -- range( obj.size ) >> range( follow_mode )
# [OR]
# -- if( j != follow_mode and ei[j] < 0.0 ):
for j in range( obj.size ):
if( j != follow_mode ):
tmp = 0.0
for i in range( obj.size ):
tmp += ev[i*obj.size+j] * mvec[i]
# tmp = math.fabs( tmp )
if( tmp > ovr ):
ovr = tmp
follow_mode = j
if( lowr != follow_mode ):
log_function( "[Allow_Overlap] Selected following mode: %ld (%.6lf)"%( follow_mode, ovr ) )
eiv = ei[follow_mode];
mvec = []
for i in range( obj.size ):
mvec.append( ev[i*obj.size+follow_mode] )
# Calculate the step for the maximization
if( math.fabs( gx[follow_mode] ) > tol1 ):
tmp = 0.5 * ( ei[follow_mode] + math.sqrt( ei[follow_mode] * ei[follow_mode] + 4.0 * gx[follow_mode] * gx[follow_mode] ) )
lmbd = gx[follow_mode] / ( tmp - ei[follow_mode] )
else:
if( nneg == 1 ):
lmbd = - gx[follow_mode] / ei[follow_mode]
else:
lmbd = mstp
for i in range( obj.size ):
dx[i] = lmbd * ev[i*obj.size+follow_mode];
# minimize along the selected modes (skip first if followed or not)
if( follow_mode == 0 ):
lowr = 1
else:
lowr = 0
lmbd = 0.0
if( ei[lowr] < 0.0 ):
lmbd = ei[lowr] - step
l1 = ei[lowr]
l2 = - lrge
ovr = 0.0;
for j in range( obj.size ):
if( j != follow_mode ):
ovr += ( gx[j] * gx[j] ) / ( lmbd - ei[j] )
i = 0
while( i < mxit and math.fabs( lmbd - ovr ) >= tol2 ):
if( ei[lowr] > 0.0 ):
lmbd = ovr;
else:
if( ovr < lmbd ):
l1 = lmbd;
if( ovr > lmbd ):
l2 = lmbd;
if( l2 > - lrge ):
lmbd = 0.5 * ( l1 + l2 )
elif( l2 == - lrge ):
lmbd -= step;
ovr = 0.0;
for j in range( obj.size ):
if( j != follow_mode ):
ovr += ( gx[j] * gx[j] ) / ( lmbd - ei[j] )
i += 1
if( i > mxit ):
log_function( "\n -- Too much lambda iterations..." )
flg = False
# modify follow_mode eigenvalues and vectors...
if( follow_mode > -1 ):
ei[follow_mode] = lmbd - 1.0
for i in range( obj.size ):
ev[i*obj.size+follow_mode] = 0.0
# check final step (may be too small or too large...)
for i in range( obj.size ):
dx[i] += sum( [ ev[i*obj.size+j] * gx[j] / ( lmbd - ei[j] ) for j in range( obj.size ) ] )
ovr = math.sqrt( sum( [ dx[i] * dx[i] for i in range( obj.size ) ] ) )
# checking for a small step
if( ovr < tol2 ):
log_function( "\n -- The step size is *very* small..." )
flg = False
# scale long steps...
if( ovr > step_size ):
for i in range( obj.size ):
dx[i] *= step_size / ovr
# next step...
k += 1
grms /= math.sqrt( float( obj.size ) )
# print something...
if( k%print_frequency == 0 ):
if( follow_mode < 0 ):
log_function( "%10ld%20.5lf%20.10lf%5ld"%( k, obj.func, grms, nneg ) )
else:
log_function( "%10ld%20.5lf%20.10lf%5ld%5ld%10.2lf"%( k, obj.func, grms, nneg, follow_mode, eiv ) )
obj.current_step( k )
if( k%print_frequency != 0 ):
if( follow_mode < 0 ):
log_function( "%10ld%20.5lf%20.10lf%5ld"%( k, obj.func, grms, nneg ) )
else:
log_function( "%10ld%20.5lf%20.10lf%5ld%5ld%10.2lf"%( k, obj.func, grms, nneg, follow_mode, ei[follow_mode] ) )
if( follow_mode > -1 ):
log_function( "-" * 70 + "\n" )
else:
log_function( "-" * 55 )
def rfo( obj,
step_number = 100,
step_size = 0.1,
print_frequency = 10,
gradient_tolerance = 1.5,
follow_mode = -1, # -1 : minimum / 0... : mode following (TS)
log_function = default_log ):
if( follow_mode >= obj.size or follow_mode < -1 ):
follow_mode = -1
log_function( "---------------------------------------- Minimization (RFO)\n" )
log_function( "Degrees of Freedom: %20ld"%( obj.size ) )
log_function( "Following Mode: %20d"%( follow_mode ) )
log_function( "Step Number: %20d"%( step_number ) )
log_function( "Step Size: %20.10lg"%( step_size ) )
log_function( "Print Frequency: %20d"%( print_frequency ) )
log_function( "Gradient Tolerance: %20.10lg\n"%( gradient_tolerance ) )
log_function( "%10s%20s%20s"%( "Step", "Function", "Gradient" ) )
log_function( "-" * 50 )
tol2 = 1.0e-8
dx = [ 0.0 for i in range( obj.size ) ]
dd = [ 0.5 for i in range( obj.size ) ]
if( follow_mode > -1 ):
dd[follow_mode] *= -1.0
grms = gradient_tolerance * 2.0
k = 0
flg = True
while( k < step_number and grms > gradient_tolerance and flg ):
# update coordinates
for i in range( obj.size ):
obj.coor[i] -= dx[i]
# get into for the new point
obj.get_hess()
ei, ev = qm3.maths.matrix.diag( obj.hess, obj.size )
# calcualte new step
vg = qm3.maths.matrix.mult( qm3.maths.matrix.T( ev, obj.size, obj.size ), obj.size, obj.size, obj.grad, obj.size, 1 )
lg = [ dd[i] * ( math.fabs( ei[i] ) + math.sqrt( ei[i] * ei[i] + 4.0 * vg[i] * vg[i] ) ) for i in range( obj.size ) ]
# just using lambda, and not the shifted version: vg[i] / ( ei[i] - lg[i] )
# pure newton-raphson step would use only the corresponding eigenvalue (ei[i])
dx = qm3.maths.matrix.mult( ev, obj.size, obj.size, [ vg[i] / lg[i] for i in range( obj.size ) ], obj.size, 1 )
tt = math.sqrt( sum( [ dx[i] * dx[i] for i in range( obj.size ) ] ) )
# checking for a small step
if( tt < tol2 ):
log_function( "\n -- The step size is *very* small..." )
flg = False
# scale long steps...
if( tt > step_size ):
for i in range( obj.size ):
dx[i] *= step_size / tt
# next step...
k += 1
grms = math.sqrt( sum( [ obj.grad[i] * obj.grad[i] for i in range( obj.size ) ] ) / float( obj.size ) )
# print something...
if( k%print_frequency == 0 ):
log_function( "%10ld%20.5lf%20.10lf"%( k, obj.func, grms ) )
obj.current_step( k )
if( k%print_frequency != 0 ):
log_function( "%10ld%20.5lf%20.10lf"%( k, obj.func, grms ) )
log_function( "-" * 50 )
###############################################################################
# Iterable version of the minimizers
#
class stepped_fire( object ):
def __init__( self, obj, step_size = 0.1, print_frequency = 10, log_function = default_log ):
self.obj = obj
self.nstp = 0
self.ssiz = step_size
self.alph = 0.1
self.velo = [ 0.0 for i in range( obj.size ) ]
self.step = [ 0.0 for i in range( obj.size ) ]
self.log_function = log_function
self.print_frequency = print_frequency
self.step_size = step_size
self.log_function( "---------------------------------------- Minimization (FIRE)\n" )
self.log_function( "Degrees of Freedom: %20ld"%( obj.size ) )
self.log_function( "Step Size: %20.10lg"%( step_size ) )
self.log_function( "Print Frequency: %20d\n"%( print_frequency ) )
self.log_function( "%10s%20s%20s"%( "Step", "Function", "Gradient" ) )
self.log_function( "-" * 50 )
self.obj.get_grad()
self.norm = math.sqrt( sum( [ i*i for i in self.obj.grad ] ) )
self.grms = self.norm / math.sqrt( self.obj.size )
self.log_function( "%10s%20.5lf%20.8lf"%( "", self.obj.func, self.grms ) )
self.i = 0
def iterate( self ):
if( - sum( [ - self.velo[j] * self.obj.grad[j] for j in range( self.obj.size ) ] ) > 0.0 ):
vsiz = math.sqrt( sum( [ self.velo[j] * self.velo[j] for j in range( self.obj.size ) ] ) )
self.velo = [ ( 1.0 - self.alph ) * self.velo[j] - self.alph * self.obj.grad[j] / self.norm * vsiz for j in range( self.obj.size ) ]
if( self.nstp > 5 ):
self.ssiz = min( self.ssiz * 1.1, self.step_size )
self.alph *= 0.99
self.nstp += 1
else:
self.alph = 0.1
self.ssiz *= 0.5
self.nstp = 0
self.velo = [ 0.0 for j in range( self.obj.size ) ]
for j in range( self.obj.size ):
self.velo[j] -= self.ssiz * self.obj.grad[j]
self.step[j] = self.ssiz * self.velo[j]
tmp = math.sqrt( sum( [ self.step[j] * self.step[j] for j in range( self.obj.size ) ] ) )
if( tmp > self.ssiz ):
for j in range( self.obj.size ):
self.step[j] *= self.ssiz / tmp
for j in range( self.obj.size ):
self.obj.coor[j] += self.step[j]
self.obj.get_grad()
self.norm = math.sqrt( sum( [ i*i for i in self.obj.grad ] ) )
self.grms = self.norm / math.sqrt( self.obj.size )
self.i += 1
if( self.i%self.print_frequency == 0 ):
self.log_function( "%10d%20.5lf%20.10lf"%( self.i, self.obj.func, self.grms ) )
self.obj.current_step( self.i )
|
StarcoderdataPython
|
12820994
|
"""
Server-Sent Events implementation for streaming.
Based on: https://bitbucket.org/btubbs/sseclient/src/a47a380a3d7182a205c0f1d5eb470013ce796b4d/sseclient.py?at=default&fileviewer=file-view-default
"""
# currently excluded from documentation - see docs/README.md
import re
import time
import urllib3
from ldclient.config import HTTPConfig
from ldclient.impl.http import HTTPFactory
from ldclient.util import log
from ldclient.util import throw_if_unsuccessful_response
# Technically, we should support streams that mix line endings. This regex,
# however, assumes that a system will provide consistent line endings.
end_of_field = re.compile(r'\r\n\r\n|\r\r|\n\n')
class SSEClient:
def __init__(self, url, last_id=None, retry=3000, connect_timeout=10, read_timeout=300, chunk_size=10000,
verify_ssl=False, http=None, http_proxy=None, http_factory=None, **kwargs):
self.url = url
self.last_id = last_id
self.retry = retry
self._chunk_size = chunk_size
if http_factory:
self._timeout = http_factory.timeout
base_headers = http_factory.base_headers
else:
# for backward compatibility in case anyone else is using this class
self._timeout = urllib3.Timeout(connect=connect_timeout, read=read_timeout)
base_headers = {}
# Optional support for passing in an HTTP client
if http:
self.http = http
else:
hf = http_factory
if hf is None: # build from individual parameters which we're only retaining for backward compatibility
hc = HTTPConfig(
connect_timeout=connect_timeout,
read_timeout=read_timeout,
disable_ssl_verification=not verify_ssl,
http_proxy=http_proxy
)
hf = HTTPFactory({}, hc)
self.http = hf.create_pool_manager(1, url)
# Any extra kwargs will be fed into the request call later.
self.requests_kwargs = kwargs
# The SSE spec requires making requests with Cache-Control: nocache
if 'headers' not in self.requests_kwargs:
self.requests_kwargs['headers'] = {}
self.requests_kwargs['headers'].update(base_headers)
self.requests_kwargs['headers']['Cache-Control'] = 'no-cache'
# The 'Accept' header is not required, but explicit > implicit
self.requests_kwargs['headers']['Accept'] = 'text/event-stream'
# Keep data here as it streams in
self.buf = u''
self._connect()
def _connect(self):
if self.last_id:
self.requests_kwargs['headers']['Last-Event-ID'] = self.last_id
# Use session if set. Otherwise fall back to requests module.
self.resp = self.http.request(
'GET',
self.url,
timeout=self._timeout,
preload_content=False,
retries=0, # caller is responsible for implementing appropriate retry semantics, e.g. backoff
**self.requests_kwargs)
# Raw readlines doesn't work because we may be missing newline characters until the next chunk
# For some reason, we also need to specify a chunk size because stream=True doesn't seem to guarantee
# that we get the newlines in a timeline manner
self.resp_file = self.resp.stream(amt=self._chunk_size)
# TODO: Ensure we're handling redirects. Might also stick the 'origin'
# attribute on Events like the Javascript spec requires.
throw_if_unsuccessful_response(self.resp)
def _event_complete(self):
return re.search(end_of_field, self.buf[len(self.buf)-self._chunk_size-10:]) is not None # Just search the last chunk plus a bit
def __iter__(self):
return self
def __next__(self):
while not self._event_complete():
try:
nextline = next(self.resp_file)
# There are some bad cases where we don't always get a line: https://github.com/requests/requests/pull/2431
if not nextline:
raise EOFError()
self.buf += nextline.decode("utf-8")
except (StopIteration, EOFError) as e:
if self.retry:
# This retry logic is not what we want in the SDK. It's retained here for backward compatibility in case
# anyone else is using SSEClient.
time.sleep(self.retry / 1000.0)
self._connect()
# The SSE spec only supports resuming from a whole message, so
# if we have half a message we should throw it out.
head, sep, tail = self.buf.rpartition('\n')
self.buf = head + sep
continue
else:
raise
split = re.split(end_of_field, self.buf)
head = split[0]
tail = "".join(split[1:])
self.buf = tail
msg = Event.parse(head)
# If the server requests a specific retry delay, we need to honor it.
if msg.retry:
self.retry = msg.retry
# last_id should only be set if included in the message. It's not
# forgotten if a message omits it.
if msg.id:
self.last_id = msg.id
return msg
class Event:
sse_line_pattern = re.compile('(?P<name>[^:]*):?( ?(?P<value>.*))?')
def __init__(self, data='', event='message', id=None, retry=None):
self.data = data
self.event = event
self.id = id
self.retry = retry
def dump(self):
lines = []
if self.id:
lines.append('id: %s' % self.id)
# Only include an event line if it's not the default already.
if self.event != 'message':
lines.append('event: %s' % self.event)
if self.retry:
lines.append('retry: %s' % self.retry)
lines.extend('data: %s' % d for d in self.data.split('\n'))
return '\n'.join(lines) + '\n\n'
@classmethod
def parse(cls, raw):
"""
Given a possibly-multiline string representing an SSE message, parse it
and return a Event object.
"""
msg = cls()
for line in raw.split('\n'):
m = cls.sse_line_pattern.match(line)
if m is None:
# Malformed line. Discard but warn.
log.warning('Invalid SSE line: "%s"' % line)
continue
name = m.groupdict()['name']
value = m.groupdict()['value']
if name == '':
# line began with a ":", so is a comment. Ignore
continue
if name == 'data':
# If we already have some data, then join to it with a newline.
# Else this is it.
if msg.data:
msg.data = '%s\n%s' % (msg.data, value)
else:
msg.data = value
elif name == 'event':
msg.event = value
elif name == 'id':
msg.id = value
elif name == 'retry':
msg.retry = int(value)
return msg
def __str__(self):
return self.data
|
StarcoderdataPython
|
369377
|
<filename>ipynb.py
# -*- coding: utf-8 -*-
#
# This software is licensed as some open-source license; which is
# yet to be decided.
#
# Author: <NAME> <<EMAIL>>
#
# Trac support for IPython Notebook attachments
#
# Loosely based on the ReST support by
# <NAME>, <NAME>, and <NAME>.
# (@trac/mimeview/rst.py)
#
__docformat__ = 'IPythonNotebook'
from distutils.version import StrictVersion
try:
from IPython import __version__
from IPython import nbconvert
from IPython import nbformat
has_docutils = True
except ImportError:
has_docutils = False
from trac.core import *
from trac.env import ISystemInfoProvider
from trac.mimeview.api import IHTMLPreviewRenderer
class IPythonNotebookRenderer(Component):
"""HTML renderer for IPython Notebooks;
Requires IPython[notebook] (which requires pandoc)
"""
implements(ISystemInfoProvider, IHTMLPreviewRenderer)
can_render = False
def __init__(self):
self.can_render = True
# ISystemInfoProvider methods
def get_system_info(self):
if has_docutils:
yield 'Docutils', __version__
# IHTMLPreviewRenderer methods
def get_quality_ratio(self, mimetype):
if self.can_render and mimetype in ('text/x-ipynb') :
return 8
return 0
def render(self, context, mimetype, content, filename=None, rev=None):
try:
node = nbformat.reader.read(content)
result = nbconvert.export_html(node, config=dict(template='basic'))
return """%s""" % str(result[0])
except Exception as e:
return """<h2>Conversion failed</h2><pre>%s</pre>""" % str(e)
|
StarcoderdataPython
|
12856781
|
<reponame>yewzijian/RegTR<gh_stars>10-100
import torch
import torch.nn as nn
from utils.se3_torch import se3_transform_list
_EPS = 1e-6
class CorrCriterion(nn.Module):
"""Correspondence Loss.
"""
def __init__(self, metric='mae'):
super().__init__()
assert metric in ['mse', 'mae']
self.metric = metric
def forward(self, kp_before, kp_warped_pred, pose_gt, overlap_weights=None):
losses = {}
B = pose_gt.shape[0]
kp_warped_gt = se3_transform_list(pose_gt, kp_before)
corr_err = torch.cat(kp_warped_pred, dim=0) - torch.cat(kp_warped_gt, dim=0)
if self.metric == 'mae':
corr_err = torch.sum(torch.abs(corr_err), dim=-1)
elif self.metric == 'mse':
corr_err = torch.sum(torch.square(corr_err), dim=-1)
else:
raise NotImplementedError
if overlap_weights is not None:
overlap_weights = torch.cat(overlap_weights)
mean_err = torch.sum(overlap_weights * corr_err) / torch.clamp_min(torch.sum(overlap_weights), _EPS)
else:
mean_err = torch.mean(corr_err, dim=1)
return mean_err
|
StarcoderdataPython
|
259838
|
<reponame>yuchiu/scraper-practice
try:
from urllib.parse import urlencode
except ImportError:
from urlparse import urlencode # pylint: disable=E0401
import requests # pylint: disable=E0401
from requests.exceptions import RequestException # pylint: disable=E0401
import json
import re
def get_page_index(offset, keyword):
data = {
"offset": offset,
"format": "json",
"keyword": keyword,
"autoload": "true",
"count": 20,
"cur_tab": 1,
"from": "search_tab"
}
url = "https://www.toutiao.com/search_content/?" + urlencode(data)
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
return None
except RequestException:
print('request index failed')
return None
def parse_page_index(html):
data = json.loads(html)
if data and 'data' in data.keys():
for item in data.get('data'):
yield item.get('article_url')
def main():
html = get_page_index(0, "autoshow")
for url in parse_page_index(html):
print(url)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3233111
|
import requests
from dashboard.Image import draw_black, draw_red, get_enhanced_icon, h_red_image, small_font, medium_font
from dashboard.Config import open_weather_map_api_key, lat, lon, units, unit_letter
def print_weather():
weather = get_weather()
icon = get_enhanced_icon(weather['icon_path'], 45, False)
h_red_image.paste(icon, (2, 32))
draw_red.text(
(50, 32),
weather['current_temp'][0:2] + 'º' + unit_letter,
font=medium_font,
fill=0)
draw_red.text(
(50, 52),
weather['description'],
font=medium_font,
fill=0)
draw_black.text(
(112, 32),
'Feels like ' + weather['feels_like'][0:2] + 'º\n' +
'Humidity ' + weather['humidity'][0:2] + '% ',
font=small_font,
fill=0)
draw_black.text(
(202, 32),
'Day ' + weather['max_temp'][0:2] + 'º\n' +
'Night ' + weather['min_temp'][0:2] + 'º',
font=small_font,
fill=0)
def get_weather():
weather_url = 'https://api.openweathermap.org/data/2.5/onecall?lat=' + lat + '&lon=' + lon + '&units=' + units + '&appid=' + open_weather_map_api_key
weather = requests.get(weather_url)
if weather.ok:
weather_dictionary = weather.json()
return {
'description': str(weather_dictionary.get('current').get('weather')[0].get('main')),
'current_temp': str(weather_dictionary.get('current').get('temp')),
'min_temp': str(weather_dictionary.get('daily')[0].get('temp').get('min')),
'max_temp': str(weather_dictionary.get('daily')[0].get('temp').get('max')),
'icon_path': str(
'assets/weather/' + weather_dictionary.get('current').get('weather')[0].get('icon') + '.png'),
'feels_like': str(weather_dictionary.get('current').get('feels_like')),
'humidity': str(weather_dictionary.get('current').get('humidity'))
}
else:
return {}
|
StarcoderdataPython
|
6508163
|
class SilkError(Exception):
pass
class SilkNotConfigured(SilkError):
pass
class SilkInternalInconsistency(SilkError):
pass
|
StarcoderdataPython
|
4883022
|
<filename>Dev/Tests/firmware_checker.py
# firmware_checker.py v_0_4_0
from cyberbot import *
class firmware():
def __init__(self):
bot().send_c(98)
self.v=bot().read_r()
def version_info(self):
#v=self.v
p=self.v%100
mi=((self.v%10000)-p)/100
ma=((self.v%1000000)-mi-p)/10000
print("Firmware: v%d.%d.%d" %(ma, mi, p))
display.show("FW:v%d.%d.%d" %(ma, mi, p))
|
StarcoderdataPython
|
139471
|
<gh_stars>100-1000
from distutils.core import setup, Extension
marisa_module = Extension("_marisa",
sources=["marisa-swig_wrap.cxx", "marisa-swig.cxx"],
libraries=["marisa"])
setup(name = "marisa",
ext_modules = [marisa_module],
py_modules = ["marisa"])
|
StarcoderdataPython
|
1961563
|
<gh_stars>1-10
import numpy as np
from scipy.optimize import least_squares
from scipy.integrate import odeint
def sol_u(t, u0, alpha, beta):
return u0*np.exp(-beta*t) + alpha/beta*(1-np.exp(-beta*t))
def sol_s(t, s0, u0, alpha, beta, gamma):
exp_gt = np.exp(-gamma*t)
if beta == gamma:
s = s0*exp_gt + (beta*u0-alpha)*t*exp_gt + alpha/gamma * (1-exp_gt)
else:
s = s0*exp_gt + alpha/gamma * (1-exp_gt) + (alpha - u0*beta)/(gamma-beta) * (exp_gt - np.exp(-beta*t))
return s
def sol_p(t, p0, s0, u0, alpha, beta, gamma, eta, gamma_p):
u = sol_u(t, u0, alpha, beta)
s = sol_s(t, s0, u0, alpha, beta, gamma)
exp_gt = np.exp(-gamma_p*t)
p = p0*exp_gt + eta/(gamma_p-gamma)*(s-s0*exp_gt - beta/(gamma_p-beta)*(u-u0*exp_gt-alpha/gamma_p*(1-exp_gt)))
return p, s, u
def sol_ode(x, t, alpha, beta, gamma, eta, gamma_p):
dx = np.zeros(x.shape)
dx[0] = alpha - beta * x[0]
dx[1] = beta * x[0] - gamma * x[1]
dx[2] = eta * x[1] - gamma_p * x[2]
return dx
def sol_num(t, p0, s0, u0, alpha, beta, gamma, eta, gamma_p):
sol = odeint(lambda x, t: sol_ode(x, t, alpha, beta, gamma, eta, gamma_p), np.array([u0, s0, p0]), t)
return sol
def fit_gamma_labeling(t, l, mode=None, lbound=None):
t = np.array(t, dtype=float)
l = np.array(l, dtype=float)
if l.ndim == 1:
# l is a vector
n_rep = 1
else:
n_rep = l.shape[0]
t = np.tile(t, n_rep)
l = l.flatten()
# remove low counts based on lbound
if lbound is not None:
t[l<lbound] = np.nan
l[l<lbound] = np.nan
n = np.sum(~np.isnan(t))
tau = t - np.nanmin(t)
tm = np.nanmean(tau)
# prepare y
y = np.log(l)
ym = np.nanmean(y)
# calculate slope
var_t = np.nanmean(tau**2) - tm**2
cov = np.nansum(y * tau) / n - ym * tm
k = cov / var_t
# calculate intercept
b = np.exp(ym - k * tm) if mode != 'fast' else None
gamma = -k
u0 = b
return gamma, u0
def fit_beta_lsq(t, l, bounds=(0, np.inf), fix_l0=False, beta_0=None):
tau = t - np.min(t)
l0 = np.mean(l[:, tau == 0])
if beta_0 is None: beta_0 = 1
if fix_l0:
f_lsq = lambda b: (sol_u(tau, l0, 0, b) - l).flatten()
ret = least_squares(f_lsq, beta_0, bounds=bounds)
beta = ret.x
else:
f_lsq = lambda p: (sol_u(tau, p[1], 0, p[0]) - l).flatten()
ret = least_squares(f_lsq, np.array([beta_0, l0]), bounds=bounds)
beta = ret.x[0]
l0 = ret.x[1]
return beta, l0
def fit_alpha_labeling(t, u, gamma, mode=None):
n = u.size
tau = t - np.min(t)
expt = np.exp(gamma*tau)
# prepare x
x = expt - 1
xm = np.mean(x)
# prepare y
y = u * expt
ym = np.mean(y)
# calculate slope
var_x = np.mean(x**2) - xm**2
cov = np.sum(y.dot(x)) / n - ym * xm
k = cov / var_x
# calculate intercept
b = ym - k * xm if mode != 'fast' else None
return k * gamma, b
def fit_alpha_synthesis(t, u, beta, mode=None):
tau = t - np.min(t)
expt = np.exp(-beta*tau)
# prepare x
x = 1 - expt
return beta * np.mean(u) / np.mean(x)
def fit_gamma_splicing(t, s, beta, u0, bounds=(0, np.inf), fix_s0=False):
tau = t - np.min(t)
s0 = np.mean(s[:, tau == 0])
g0 = beta * u0/s0
if fix_s0:
f_lsq = lambda g: (sol_s(tau, s0, u0, 0, beta, g) - s).flatten()
ret = least_squares(f_lsq, g0, bounds=bounds)
gamma = ret.x
else:
f_lsq = lambda p: (sol_s(tau, p[1], u0, 0, beta, p[0]) - s).flatten()
ret = least_squares(f_lsq, np.array([g0, s0]), bounds=bounds)
gamma = ret.x[0]
s0 = ret.x[1]
return gamma, s0
def fit_gamma(u, s):
cov = u.dot(s) / len(u) - np.mean(u) * np.mean(s)
var_s = s.dot(s) / len(s) - np.mean(s)**2
gamma = cov / var_s
return gamma
|
StarcoderdataPython
|
6545858
|
import argparse
from collections import Counter
import csv
import math
import sys
from time import perf_counter
from cubicasa import Cubicasa, ROOM_TYPES, FIXTURE_TYPES
def get_headers():
headers = [
"path",
"type",
"classes",
"floor_index",
"num_sides",
"area",
"proportion_floor_area",
"perimeter",
"compactness",
"num_adjacent_walls",
"proportion_exterior_walls",
"num_adjacent_railings",
# "proportion_exterior_railings",
"num_adjacent_rooms",
"num_connected_rooms",
"num_doors",
"num_windows",
"num_fixtures",
"open_to",
"door_to",
"contains",
]
for t in ROOM_TYPES:
headers.append("open_to_" + t)
headers.append("door_to_" + t)
for t in FIXTURE_TYPES:
headers.append("contains_" + t)
return headers
def isoperimetric_quotient(polygon):
"""Returns the isoperimetric quotient of the polygon, a measure of its compactness."""
return 4 * math.pi * polygon.area / polygon.length ** 2
def summarize_counter(c):
return ", ".join([label if count == 1 else "{} ({}x)".format(label, count) for label, count in c.items()])
def process(model):
try:
for floor_index, floor in enumerate(model.floors):
floor.find_objects()
floor.find_adjacencies()
floor.find_inside()
floor_area = floor.area()
for room in floor.rooms:
adjacent_rooms = Counter()
for r in room.adjacent_rooms():
adjacent_rooms[r.simple_type] += 1
connected_rooms = Counter()
for r in room.connected_rooms():
type = r.simple_type if r is not None else "Outside"
connected_rooms[type] += 1
fixture_types = Counter()
for fixture in room.fixtures:
fixture_types[fixture.simple_type] += 1
room_area = room.polygon.area
proportion_floor_area = room_area / floor_area if floor_area > 0 else 0
num_walls = len(room.adjacent_walls())
num_exterior_walls = len(room.adjacent_exterior_walls())
proportion_exterior_walls = num_exterior_walls / num_walls if num_walls > 0 else 0
num_railings = len(room.adjacent_railings())
num_exterior_railings = 0
for railing in room.adjacent_railings():
if len(railing.rooms_opposite(room)) == 0:
num_exterior_railings += 1
proportion_exterior_railings = num_exterior_railings / num_railings if num_railings > 0 else 0
data = {
"path": model.path,
"type": room.simple_type,
"classes": room.full_type,
"floor_index": floor_index,
"num_sides": room.num_edges(),
"area": room_area,
"proportion_floor_area": proportion_floor_area,
"perimeter": room.polygon.length,
"compactness": isoperimetric_quotient(room.polygon),
"num_adjacent_walls": num_walls,
"proportion_exterior_walls": proportion_exterior_walls,
"num_adjacent_railings": num_railings,
# "proportion_exterior_railings": proportion_exterior_railings,
"num_adjacent_rooms": sum(adjacent_rooms.values()),
"num_connected_rooms": sum(connected_rooms.values()),
"num_doors": len(room.doors),
"num_windows": len(room.windows),
"num_fixtures": len(room.fixtures),
}
data["open_to"] = summarize_counter(adjacent_rooms)
data["door_to"] = summarize_counter(connected_rooms)
data["contains"] = summarize_counter(fixture_types)
for t in ROOM_TYPES:
data["open_to_"+t] = adjacent_rooms[t]
data["door_to_"+t] = connected_rooms[t]
for t in FIXTURE_TYPES:
data["contains_"+t] = fixture_types[t]
yield data
except Exception as e:
print("Error when processing {}".format(m.path), file=sys.stderr)
raise
parser = argparse.ArgumentParser()
parser.add_argument("basepath", metavar="CUBICASA_PATH", help="The path to the cubicasa5k folder")
parser.add_argument("-l", "--limit", type=int, help="The maximum number of plans to process")
parser.add_argument("-o", "--offset", type=int, help="The number of plans to skip before processing", default=0)
parser.add_argument("-p", "--plan", help="The relative path to a specific plan to process")
args = parser.parse_args()
start_time = perf_counter()
c = Cubicasa(args.basepath)
w = csv.DictWriter(sys.stdout, fieldnames=get_headers())
w.writeheader()
if args.plan is not None:
m = c.get_model(args.plan)
for data in process(m):
w.writerow(data)
else:
for m in c.models(args.limit, args.offset):
for data in process(m):
w.writerow(data)
elapsed = perf_counter() - start_time
minutes = int(elapsed // 60)
seconds = elapsed % 60
print("Completed in {:02d}:{:07.4f}".format(minutes, seconds), file=sys.stderr)
|
StarcoderdataPython
|
4923
|
import tensorflow as tf
@tf.function
def BinaryAccuracy_Infiltrates(y_true, y_pred, i=0):
return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])
@tf.function
def BinaryAccuracy_Pneumonia(y_true, y_pred, i=1):
return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])
@tf.function
def BinaryAccuracy_Covid19(y_true, y_pred, i=2):
return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])
@tf.function
def BinaryAccuracy_Normal(y_true, y_pred, i=3):
return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])
class CustomModelCheckpoint(tf.keras.callbacks.ModelCheckpoint):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.wait_epoch_warmup = kwargs.get("wait_epoch_warmup")
def on_epoch_end(self, epoch, logs=None):
if self.wait_epoch_warmup:
if (epoch + 1) >= self.wait_epoch_warmup:
super().on_epoch_end(epoch, logs)
else:
self.epochs_since_last_save += 1
print(f"Skipping save model (wait_epoch_warmup={self.wait_epoch_warmup - (epoch + 1)})")
else:
super().on_epoch_end(epoch, logs)
class CustomEarlyStopping(tf.keras.callbacks.EarlyStopping):
def __init__(self, *args, **kwargs):
self.minimum_epochs = kwargs.get("minimum_epochs", 0)
kwargs.pop('minimum_epochs', None) # Problems with EarlyStopping kwargs
super().__init__(*args, **kwargs)
def on_epoch_end(self, epoch, logs=None):
if epoch >= self.minimum_epochs:
super().on_epoch_end(epoch, logs)
def get_losses():
losses = [tf.keras.losses.BinaryCrossentropy()]
return losses
def get_metrics(single_output_idx, add_normal=False):
metrics = []
if single_output_idx is None: # Multi-label
print("###### Multi-label classification ######")
metrics += [
BinaryAccuracy_Infiltrates,
BinaryAccuracy_Pneumonia,
BinaryAccuracy_Covid19
]
# Add normal class
if add_normal:
metrics.append(BinaryAccuracy_Normal)
else:
print(f"###### Multi-class classification (cls: '{single_output_idx}') ######")
metrics = [
tf.keras.metrics.BinaryAccuracy(),
tf.keras.metrics.AUC(),
tf.keras.metrics.Precision(),
tf.keras.metrics.Recall()
]
return metrics
def get_model(backbone, classes=None, target_size=None, freeze_base_model=True, ignore_model=None):
istrainable = not freeze_base_model
# Select backbone
if backbone == "resnet50":
from tensorflow.keras.applications.resnet import ResNet50 as TFModel
from tensorflow.keras.applications.resnet import preprocess_input
elif backbone == "resnet50v2":
from tensorflow.keras.applications.resnet_v2 import ResNet50V2 as TFModel
from tensorflow.keras.applications.resnet_v2 import preprocess_input
elif backbone == "resnet101v2":
from tensorflow.keras.applications.resnet_v2 import ResNet101V2 as TFModel
from tensorflow.keras.applications.resnet_v2 import preprocess_input
elif backbone == "vgg16":
from tensorflow.keras.applications.vgg16 import VGG16 as TFModel
from tensorflow.keras.applications.vgg16 import preprocess_input
elif backbone == "efficientnetb0":
from tensorflow.keras.applications.efficientnet import EfficientNetB0 as TFModel
from tensorflow.keras.applications.efficientnet import preprocess_input
elif backbone == "efficientnetb7":
from tensorflow.keras.applications.efficientnet import EfficientNetB7 as TFModel
from tensorflow.keras.applications.efficientnet import preprocess_input
else:
raise ValueError(f"Unknown backbone: {backbone}")
if ignore_model:
model = None
else:
# Instantiate base model with pre-trained weights
base_model = TFModel(input_shape=(*target_size, 3), include_top=False, weights="imagenet")
# Freeze base model
# base_model.trainable = istrainable
for layers in base_model.layers:
layers.trainable = istrainable
# Create a new model on top
inputs = base_model.input
x = base_model(inputs)
# Option A
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
# Option B
# x = tf.keras.layers.Flatten(name='flatten')(x)
# x = tf.keras.layers.Dense(512, activation='relu', name='fc1')(x)
# x = tf.keras.layers.Dense(512, activation='relu', name='fc2')(x)
# Outputs
outputs = tf.keras.layers.Dense(classes, activation="sigmoid", name='predictions')(x)
model = tf.keras.Model(inputs, outputs)
return model, preprocess_input
def add_tabular_input(model, classes):
# Input1
input1 = model.input
input2 = tf.keras.layers.Input(shape=(2,), name="input_2b")
# Pre-outputs 1x3 + 1x3
output1 = model.output
output2 = tf.keras.layers.Dense(classes, activation="sigmoid", name='output_tab')(input2)
# Outputs
x = tf.keras.layers.Concatenate(axis=1)([output1, output2])
output = tf.keras.layers.Dense(classes, activation="sigmoid", name='final_predictions')(x)
model = tf.keras.Model([input1, input2], output)
return model
def unfreeze_base_model(model, n=None, unfreeze=True):
base_model = model.layers[1].layers
# Select number of layers to unfreeze
idx = 0
if n is not None:
if isinstance(n, int):
idx = n
print(f"Unfreezing {len(base_model) - idx} layers")
elif isinstance(n, float) and 0.0 < n <= 1.0:
idx = int(len(base_model) * n)
print(f"Unfreezing {idx} layers")
else:
raise ValueError("Invalid number of layers")
# We unfreeze all layers but BatchNorm (to not destroy the non-trainable weights)
for layer in base_model[-idx:]:
if not isinstance(layer, tf.keras.layers.BatchNormalization):
layer.trainable = True
|
StarcoderdataPython
|
9737010
|
import collections
import logging
import os
import sys
from datarobot_batch_scoring.consts import (WriterQueueMsg, Batch,
ProgressQueueMsg)
from datarobot_batch_scoring.reader import (fast_to_csv_chunk,
slow_to_csv_chunk)
from datarobot_batch_scoring.utils import compress, get_rusage, Worker
logger = logging.getLogger(__name__)
FakeResponse = collections.namedtuple('FakeResponse', 'status_code, text')
class BaseNetworkWorker(Worker):
"""A network worker
Work for the worker is read off of the network_queue; failures are put into
the network_deque from which they are also read and sometimes retried.
Successfully returned responses are put into the writer_queue for
processing by another worker.
Occasional progress updates are occasionally put into the progress_queue
for processing by another worker
A properly functioning Worker implementation must read data from the
network_queue until it finds the SENTINEL message, which is the indicator
that no more work will be put onto the network queue. At this point the
worker should finish any remaining work and put a NETWORK_PROGRESS message
into the progress_queue with the following details:
ret : boolean
The return status (success flag) of the process
processed : int
The number of requests processed
retried : int
The number of requests retried (read from the network_deque)
consumed : int
The number of consumed requests from the network_queue
rusage : dict
The resource usage. See the function `get_rusage` in `utils`
In addition, it is helpful for debugging purposes if the worker is diligent
about minding its state transitions. As an instance of `Worker`, any time
the `self.state` attribute is updated, a message is sent to the interface
logging this fact. Valid values for the state are defined in this class in
the `state_names` class attribute.
"""
state_names = {
b"-": "Initial",
b"I": "Idle",
b"e": "PreIdle",
b"E": "PrePreIdle",
b"R": "Doing Requests",
b"F": "Pool is Full",
b"W": "Waiting for Finish",
b"D": "Done"
}
def __init__(self, concurrency, timeout, ui,
network_queue,
network_deque,
writer_queue,
progress_queue,
abort_flag,
network_status,
endpoint,
headers,
user,
api_token,
pred_name,
fast_mode,
max_batch_size,
compression,
verify_ssl):
Worker.__init__(self, network_status)
self.concurrency = concurrency
self.timeout = timeout
self.ui = ui or logger
self.network_queue = network_queue
self.network_deque = network_deque
self.writer_queue = writer_queue
self.progress_queue = progress_queue
self.abort_flag = abort_flag
self.endpoint = endpoint
self.headers = headers
self.user = user
self.api_token = api_token
self.pred_name = pred_name
self.fast_mode = fast_mode
self.max_batch_size = max_batch_size
self.compression = compression
self.verify_ssl = verify_ssl
self._timeout = timeout
self.futures = []
self.concurrency = concurrency
self._executor = None
self.session = None
self.proc = None
self.n_consumed = 0
self.n_retried = 0
self.n_requests = 0
def send_warning_to_ctx(self, batch, message):
self.ui.info('CTX WARNING batch_id {} , '
'message {}'.format(batch.id, message))
self.writer_queue.put((WriterQueueMsg.CTX_WARNING, {
"batch": batch,
"error": message
}))
def send_error_to_ctx(self, batch, message):
self.ui.info('CTX ERROR batch_id {} , '
'message {}'.format(batch.id, message))
self.writer_queue.put((WriterQueueMsg.CTX_ERROR, {
"batch": batch,
"error": message
}))
def split_batch(self, batch):
if self.fast_mode:
chunk_formatter = fast_to_csv_chunk
else:
chunk_formatter = slow_to_csv_chunk
todo = [batch]
while todo:
batch = todo.pop(0)
data = chunk_formatter(batch.data, batch.fieldnames)
starting_size = sys.getsizeof(data)
if starting_size < self.max_batch_size:
if self.compression:
data = compress(data)
self.ui.debug(
'batch {}-{} transmitting {} byte - space savings '
'{}%'.format(batch.id, batch.rows,
sys.getsizeof(data),
'%.2f' % float(1 -
(sys.getsizeof(data) /
starting_size))))
else:
self.ui.debug('batch {}-{} transmitting {} bytes'
''.format(batch.id, batch.rows,
starting_size))
yield (batch, data)
else:
if batch.rows < 2:
msg = ('batch {} is single row but bigger '
'than limit, skipping. We lost {} '
'records'.format(batch.id,
len(batch.data)))
self.ui.error(msg)
self.send_error_to_ctx(batch, msg)
continue
msg = ('batch {}-{} is too long: {} bytes,'
' splitting'.format(batch.id, batch.rows,
len(data)))
self.ui.debug(msg)
self.send_warning_to_ctx(batch, msg)
split_point = int(batch.rows/2)
data1 = batch.data[:split_point]
batch1 = Batch(batch.id, split_point, batch.fieldnames,
data1, batch.rty_cnt)
todo.append(batch1)
data2 = batch.data[split_point:]
batch2 = Batch(batch.id + split_point,
batch.rows - split_point,
batch.fieldnames, data2, batch.rty_cnt)
todo.append(batch2)
todo.sort()
def exit_fast(self, a, b):
self.state = b'D'
os._exit(1)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.proc and self.proc.is_alive():
self.proc.terminate()
def run(self):
"""This is a dummy implementation. It will return immediately."""
self.progress_queue.put((ProgressQueueMsg.NETWORK_DONE, {
"ret": True,
"processed": self.n_requests,
"retried": self.n_retried,
"consumed": self.n_consumed,
"rusage": get_rusage(),
}))
def go(self):
"""This is a dummy implementation. It will return immediately."""
return self.run()
|
StarcoderdataPython
|
11380528
|
from reliability.ALT_probability_plotting import ALT_probability_plot_Normal
from reliability.Datasets import ALT_temperature
import matplotlib.pyplot as plt
ALT_probability_plot_Normal(failures=ALT_temperature().failures, failure_stress=ALT_temperature(
).failure_stresses, right_censored=ALT_temperature().right_censored, right_censored_stress=ALT_temperature().right_censored_stresses)
plt.show()
'''
ALT Normal probability plot results:
stress original mu original sigma new mu common sigma sigma change
40 9098.952677 3203.855879 7764.809302 2258.04215 -29.52%
60 5174.454788 3021.349445 4756.980035 2258.04215 -25.26%
80 1600.177190 1169.695509 1638.730675 2258.04215 +93.05%
Total AICc: 709.5115334757447
Total BIC: 713.4159440639235
'''
|
StarcoderdataPython
|
1880463
|
<reponame>spatric5/robosuite
import numpy as np
import random
from collections import deque
class MemoryBuffer:
def __init__(self, size):
self.buffer = deque(maxlen=size)
self.maxSize = size
self.len = 0
def sample(self, count):
"""
samples a random batch from the replay memory buffer
:param count: batch size
:return: batch (numpy array)
"""
batch = []
count = min(count, self.len)
batch = random.sample(self.buffer, count)
s_arr = np.float32([arr[0] for arr in batch])
a_arr = np.float32([arr[1] for arr in batch])
r_arr = np.float32([arr[2] for arr in batch])
s1_arr = np.float32([arr[3] for arr in batch])
return s_arr, a_arr, r_arr, s1_arr
def len(self):
return self.len
def add(self, s, a, r, s1):
"""
adds a particular transaction in the memory buffer
:param s: current state
:param a: action taken
:param r: reward received
:param s1: next state
:return:
"""
transition = (s,a,r,s1)
self.len += 1
if self.len > self.maxSize:
self.len = self.maxSize
self.buffer.append(transition)
|
StarcoderdataPython
|
5132742
|
#!/usr/bin/env python
import os
import json
import torch
import pprint
import argparse
import importlib
import numpy as np
import cv2
import matplotlib
matplotlib.use("Agg")
from config import system_configs
from nnet.py_factory import NetworkFactory
from config import system_configs
from utils import crop_image, normalize_
from external.nms import soft_nms_with_points as soft_nms
from utils.color_map import colormap
from utils.visualize import vis_mask, vis_octagon, vis_ex, vis_class, vis_bbox
from dextr import Dextr
torch.backends.cudnn.benchmark = False
import time
class_name = [
0,1,2,3,4,5]
image_ext = ['jpg', 'jpeg', 'png', 'webp','tif']
def parse_args():
parser = argparse.ArgumentParser(description="Demo CornerNet", argument_default='ExtremeNet')
parser.add_argument("--cfg_file", help="config file",
default='ExtremeNet', type=str)
parser.add_argument("--demo", help="demo image path or folders",
default="data/coco/images/test1/", type=str)
parser.add_argument("--model_path",
default='cache/nnet/ExtremeNet/ExtremeNet_70000_64.pkl')
parser.add_argument("--show_mask", action='store_true',
help="Run Deep extreme cut to obtain accurate mask")
args = parser.parse_args()
return args
def _rescale_dets(detections, ratios, borders, sizes):
xs, ys = detections[..., 0:4:2], detections[..., 1:4:2]
xs /= ratios[:, 1][:, None, None]
ys /= ratios[:, 0][:, None, None]
xs -= borders[:, 2][:, None, None]
ys -= borders[:, 0][:, None, None]
np.clip(xs, 0, sizes[:, 1][:, None, None], out=xs)
np.clip(ys, 0, sizes[:, 0][:, None, None], out=ys)
def _rescale_ex_pts(detections, ratios, borders, sizes):
xs, ys = detections[..., 5:13:2], detections[..., 6:13:2]
xs /= ratios[:, 1][:, None, None]
ys /= ratios[:, 0][:, None, None]
xs -= borders[:, 2][:, None, None]
ys -= borders[:, 0][:, None, None]
np.clip(xs, 0, sizes[:, 1][:, None, None], out=xs)
np.clip(ys, 0, sizes[:, 0][:, None, None], out=ys)
def _box_inside(box2, box1):
inside = (box2[0] >= box1[0] and box2[1] >= box1[1] and \
box2[2] <= box1[2] and box2[3] <= box1[3])
return inside
def kp_decode(nnet, images, K, kernel=3, aggr_weight=0.1,
scores_thresh=0.1, center_thresh=0.1, debug=False):
detections = nnet.test(
[images], kernel=kernel, aggr_weight=aggr_weight,
scores_thresh=scores_thresh, center_thresh=center_thresh, debug=False)
st = time.time()
print(detections.device)
detections = detections.data.cpu().numpy()
print(detections.shape)
print('detections:{}'.format(time.time() - st))
return detections
# result = open('result.txt', 'w')
if __name__ == "__main__":
args = parse_args()
cfg_file = os.path.join(
system_configs.config_dir, args.cfg_file + ".json")
print("cfg_file: {}".format(cfg_file))
with open(cfg_file, "r") as f:
configs = json.load(f)
configs["system"]["snapshot_name"] = args.cfg_file
system_configs.update_config(configs["system"])
print("system config...")
pprint.pprint(system_configs.full)
print("loading parameters: {}".format(args.model_path))
print("building neural network...")
nnet = NetworkFactory(None)
print("loading parameters...")
nnet.load_pretrained_params(args.model_path)
# nnet.cuda()
# device = 'gpu'
# model = nnet.to(device)
nnet.eval_mode()
K = configs["db"]["top_k"]
aggr_weight = configs["db"]["aggr_weight"]
scores_thresh = configs["db"]["scores_thresh"]
center_thresh = configs["db"]["center_thresh"]
suppres_ghost = True
nms_kernel = 3
scales = configs["db"]["test_scales"]
weight_exp = 8
categories = configs["db"]["categories"]
nms_threshold = configs["db"]["nms_threshold"]
max_per_image = configs["db"]["max_per_image"]
nms_algorithm = {
"nms": 0,
"linear_soft_nms": 1,
"exp_soft_nms": 2
}["exp_soft_nms"]
args.show_mask = False
if args.show_mask:
dextr = Dextr()
# print(categories)
mean = np.array([0.40789654, 0.44719302, 0.47026115], dtype=np.float32)
std = np.array([0.28863828, 0.27408164, 0.27809835], dtype=np.float32)
top_bboxes = {}
if os.path.isdir(args.demo):
image_names = []
ls = os.listdir(args.demo)
for file_name in sorted(ls):
ext = file_name[file_name.rfind('.') + 1:].lower()
if ext in image_ext:
image_names.append(os.path.join(args.demo, file_name))
else:
image_names = [args.demo]
for image_id, image_name in enumerate(image_names):
print('Running ', image_name)
image = cv2.imread(image_name)
start_time = time.time()
height, width = image.shape[0:2]
detections = []
for scale in scales:
new_height = int(height * scale)
new_width = int(width * scale)
new_center = np.array([new_height // 2, new_width // 2])
inp_height = new_height | 127
inp_width = new_width | 127
images = np.zeros((1, 3, inp_height, inp_width), dtype=np.float32)
ratios = np.zeros((1, 2), dtype=np.float32)
borders = np.zeros((1, 4), dtype=np.float32)
sizes = np.zeros((1, 2), dtype=np.float32)
out_height, out_width = (inp_height + 1) // 4, (inp_width + 1) // 4
height_ratio = out_height / inp_height
width_ratio = out_width / inp_width
resized_image = cv2.resize(image, (new_width, new_height))
resized_image, border, offset = crop_image(
resized_image, new_center, [inp_height, inp_width])
resized_image = resized_image / 255.
normalize_(resized_image, mean, std)
# print(1111111)
images[0] = resized_image.transpose((2, 0, 1))
borders[0] = border
sizes[0] = [int(height * scale), int(width * scale)]
ratios[0] = [height_ratio, width_ratio]
# print(444444)
images = np.concatenate((images, images[:, :, :, ::-1]), axis=0)
images = torch.from_numpy(images)
dets = kp_decode(
nnet, images, K, aggr_weight=aggr_weight,
scores_thresh=scores_thresh, center_thresh=center_thresh,
kernel=nms_kernel, debug=True)
dets = dets.reshape(2, -1, 14)
dets[1, :, [0, 2]] = out_width - dets[1, :, [2, 0]]
dets[1, :, [5, 7, 9, 11]] = out_width - dets[1, :, [5, 7, 9, 11]]
dets[1, :, [7, 8, 11, 12]] = dets[1, :, [11, 12, 7, 8]].copy()
_rescale_dets(dets, ratios, borders, sizes)
_rescale_ex_pts(dets, ratios, borders, sizes)
dets[:, :, 0:4] /= scale
dets[:, :, 5:13] /= scale
detections.append(dets)
detections = np.concatenate(detections, axis=1)
# print(detections)
# print('-----------------------------')
classes = detections[..., -1]
classes = classes[0]
detections = detections[0]
# print(detections)
# reject detections with negative scores
# print(detections[:, 4].shape)
keep_inds = (detections[:, 4] > 0)
# print(keep_inds)
detections = detections[keep_inds]
classes = classes[keep_inds]
# print(detections)
# print(classes)
top_bboxes[image_id] = {}
for j in range(categories):
keep_inds = (classes == j)
top_bboxes[image_id][j + 1] = \
detections[keep_inds].astype(np.float32)
soft_nms(top_bboxes[image_id][j + 1],
Nt=nms_threshold, method=nms_algorithm)
scores = np.hstack([
top_bboxes[image_id][j][:, 4]
for j in range(1, categories + 1)
])
if len(scores) > max_per_image:
kth = len(scores) - max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, categories + 1):
keep_inds = (top_bboxes[image_id][j][:, 4] >= thresh)
top_bboxes[image_id][j] = top_bboxes[image_id][j][keep_inds]
if suppres_ghost:
for j in range(1, categories + 1):
n = len(top_bboxes[image_id][j])
for k in range(n):
inside_score = 0
if top_bboxes[image_id][j][k, 4] > 0.2:
for t in range(n):
if _box_inside(top_bboxes[image_id][j][t],
top_bboxes[image_id][j][k]):
inside_score += top_bboxes[image_id][j][t, 4]
if inside_score > top_bboxes[image_id][j][k, 4] * 3:
top_bboxes[image_id][j][k, 4] /= 2
infer_time_end = time.time()
infer_time = infer_time_end - start_time
if 1: # visualize
color_list = colormap(rgb=True)
mask_color_id = 0
image = cv2.imread(image_name)
input_image = image.copy()
mask_image = image.copy()
bboxes = {}
# print(categories)
for j in range(1, categories + 1):
# print(top_bboxes)
# print(j)
keep_inds = (top_bboxes[image_id][j][:, 4] > 0.5)
cat_name = class_name[j]
# print(cat_name)
# print(top_bboxes)
for bbox in top_bboxes[image_id][j][keep_inds]:
# print('ssssss', cat_name)
sc = round(bbox[4], 2)
# print('sc:{}'.format(sc))
ex = bbox[5:13].astype(np.int32).reshape(4, 2)
# print('ex:{}'.format(ex))
bbox = bbox[0:4].astype(np.int32)
# print('bbox:{}'.format(bbox))
txt = '{}{:.2f}'.format(cat_name, sc)
# print('txt:{}'.format(txt))
# print(image_name.split('/')[-1])
# result.write(str(image_name.split('/')[-1]))
# result.write(' ' + str(cat_name))
# result.write(' ' + str(sc))
# result.write(' ' + str(ex[0][0]))
# result.write(' ' + str(ex[0][1]))
# result.write(' ' + str(ex[1][0]))
# result.write(' ' + str(ex[1][1]))
# result.write(' ' + str(ex[2][0]))
# result.write(' ' + str(ex[2][1]))
# result.write(' ' + str(ex[3][0]))
# result.write(' ' + str(ex[0][1]))
# result.write('\n')
color_mask = color_list[mask_color_id % len(color_list), :3]
mask_color_id += 1
# image = vis_bbox(image,
# (bbox[0], bbox[1],
# bbox[2] - bbox[0], bbox[3] - bbox[1]))
# image = vis_class(image,
# (bbox[0], bbox[1] - 2), txt)
ex_time_end = time.time()
ex_time = ex_time_end - start_time
# is_vertical = (max(ex[0][0], ex[1][0], ex[2][0], ex[3][0])
# - min(ex[0][0], ex[1][0], ex[2][0], ex[3][0])) < 30
# is_horizontal = (max(ex[0][1], ex[1][1], ex[2][1], ex[3][1])
# - min(ex[0][1], ex[1][1], ex[2][1], ex[3][1])) < 30
# print(is_vertical, is_horizontal)
# if is_vertical or is_horizontal:
# # point1 = (min(ex[0][0], ex[1][0], ex[2][0], ex[3][0]), min(ex[0][1], ex[1][1], ex[2][1], ex[3][1]))
# # point2 = (max(ex[0][0], ex[1][0], ex[2][0], ex[3][0]), max(ex[0][1], ex[1][1], ex[2][1], ex[3][1]))
# pts = np.array([[ex[0][0], ex[0][1]], [ex[1][0], ex[1][1]], [ex[2][0], ex[2][1]], [ex[3][0], ex[3][1]]])
# # contours =
# # cv2.rectangle(image, point1, point2, (0, 128, 0), 1)
# cv2.drawContours(image, np.int32([pts]), -1, (0, 128, 0), 1, cv2.LINE_AA)
# # cv2.polylines(image, np.int32([pts]), True, (0, 128, 0), 1)
# else:
# print(max(ex[0][1], ex[1][1], ex[2][1], ex[3][1]), min(ex[0][1], ex[1][1], ex[2][1], ex[3][1]))
image = vis_octagon(
image, ex, color_mask)
# image = vis_ex(image, ex, color_mask)
if args.show_mask:
mask = dextr.segment(input_image[:, :, ::-1], ex) # BGR to RGB
mask = np.asfortranarray(mask.astype(np.uint8))
mask_image = vis_bbox(mask_image,
(bbox[0], bbox[1],
bbox[2] - bbox[0],
bbox[3] - bbox[1]))
mask_image = vis_class(mask_image,
(bbox[0], bbox[1] - 2), txt)
mask_image = vis_mask(mask_image, mask, color_mask)
# if args.show_mask:
# cv2.imshow('mask', mask_image)
# print(os.path.join('out', image_name.split('/')[-1]))
print("infer_time: {}, get_ex_time: {}".format(infer_time, ex_time))
cv2.imwrite(os.path.join('out', image_name.split('/')[-1]), image)
# cv2.waitKey()
|
StarcoderdataPython
|
358619
|
# -*- coding: utf-8 -*-
''' Client for creating a nicely formatted PDF calendar for a
specified year or the current year (default).
'''
# builtins
import calendar
import sys
from datetime import date, time, timedelta
# 3rd party
from reportlab.lib import colors
from reportlab.lib.pagesizes import inch, letter
from reportlab.platypus import SimpleDocTemplate, Table, TableStyle
# custom
import civilite._meta as meta
from civilite.schedule import (EVENT_TYPES, EVT_FIXED, EVT_NEVER_ON,
EVT_SUNSET, getCurrentSchedule)
__version__ = meta.__version__
THIS_YEAR = date.today().year
if len(sys.argv) > 1:
THIS_YEAR = int(sys.argv[1])
def onFirstPage(canvas, _doc):
''' function object for setting up the default document
this implementation does not use the doc parameter
'''
canvas.saveState()
canvas.setTitle(f'House of Prayer - {THIS_YEAR} Parking Lot Lighting Schedule')
canvas.setAuthor('AMV')
canvas.setSubject(f'HoP parking lot lighting schedule for {THIS_YEAR}')
canvas.setKeywords('')
canvas.restoreState()
print(f'Creating the lighting control schedule for year {THIS_YEAR}')
# Occupancy schedule for parking lot lighting
MY_SCHEDULE = getCurrentSchedule()
SUNSET_EVENTS = MY_SCHEDULE.createEvents(THIS_YEAR)
print('Creating template..')
CALENDAR_FILE_NAME = f'lighting_calendar_{THIS_YEAR}.pdf'
CALENDAR_DOCUMENT = SimpleDocTemplate(CALENDAR_FILE_NAME,
pagesize=letter,
leftMargin=0.2*inch,
rightMargin=0.2*inch,
topMargin=0.2*inch,
bottomMargin=0.2*inch)
print('Creating table...')
DOC_COLOR_BLUE = colors.HexColor('#99ccff')
DOC_COLOR_GREEN = colors.HexColor('#ccffcc')
DOC_COLOR_ORANGE = colors.HexColor('#ffcc99')
DOC_COLOR_GRAY_1 = colors.HexColor('#777777')
DOC_COLOR_GRAY_2 = colors.HexColor('#969696')
DOC_COLOR_GRAY_3 = colors.HexColor('#AF9E93')
# cGray3 = colors.HexColor('#677077')
EVENT_COLORS = {EVT_FIXED: DOC_COLOR_GREEN,
EVT_SUNSET: DOC_COLOR_ORANGE,
EVT_NEVER_ON: DOC_COLOR_GRAY_3}
CALENDAR_STYLE = [
# global
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'), # all cells
('ALIGN', (1, 0), (-1, -1), 'CENTER'), # day columns
('ALIGN', (0, 0), (0, -1), 'LEFT'), # month column
('ALIGN', (8, 0), (8, -1), 'CENTER'), # sunset time column
# header
('BACKGROUND', (0, 0), (7, 0), DOC_COLOR_GRAY_1),
('TEXTCOLOR', (0, 0), (7, 0), colors.white),
('BACKGROUND', (1, 0), (1, 0), DOC_COLOR_GRAY_2),
('BACKGROUND', (7, 0), (7, 0), DOC_COLOR_GRAY_2),
('BACKGROUND', (8, 0), (8, 0), DOC_COLOR_ORANGE),
]
DOCUMENT_ELEMENTS = []
START_DATE = date(THIS_YEAR, 1, 1)
DATA = [['MONTH', 'Su', 'Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa', 'Sunset']]
CELL_DATE = START_DATE
NUM_PREFIX_COLUMNS = 1
NUM_SUFFIX_COLUMNS = 1
ROW_INDEX = 1
LAST_TIME = None
CURRENT_TIME = None
while CELL_DATE.year == START_DATE.year:
COLUMN_ID = (CELL_DATE.weekday() + 1) % 7 # gives us Sunday first (Mon=0, Sun=6 in Python)
COLUMN_INDEX = COLUMN_ID + NUM_PREFIX_COLUMNS
ROW = [''] * (NUM_PREFIX_COLUMNS + COLUMN_ID)
ROW[0] = ''
for i in range(7 - COLUMN_ID):
ROW.append('%02d' % CELL_DATE.day)
info = SUNSET_EVENTS.get(CELL_DATE)
if CELL_DATE.day == 1:
ROW[0] = calendar.month_name[CELL_DATE.month]
CALENDAR_STYLE.append(('BACKGROUND', (0, ROW_INDEX), (0, ROW_INDEX), DOC_COLOR_BLUE))
CALENDAR_STYLE.append(('BOX', (COLUMN_INDEX, ROW_INDEX),
(COLUMN_INDEX, ROW_INDEX), 1, DOC_COLOR_GRAY_2))
if info is not None:
sunsetTime, eventType, eventChanged = info
if eventType is not None:
CALENDAR_STYLE.append(('BACKGROUND', (COLUMN_INDEX, ROW_INDEX),
(COLUMN_INDEX, ROW_INDEX), EVENT_COLORS[eventType]))
else:
CALENDAR_STYLE.append(('TEXTCOLOR', (COLUMN_INDEX, ROW_INDEX),
(COLUMN_INDEX, ROW_INDEX), colors.lightslategray))
CURRENT_TIME = sunsetTime
if LAST_TIME is not None and (CURRENT_TIME.utcoffset() != LAST_TIME.utcoffset()):
CALENDAR_STYLE.append(('BACKGROUND', (COLUMN_INDEX, ROW_INDEX),
(COLUMN_INDEX, ROW_INDEX), colors.yellow))
LAST_TIME = CURRENT_TIME
COLUMN_INDEX += 1
CELL_DATE += timedelta(days=1)
ROW.append(CURRENT_TIME.time().strftime('%H:%M'))
DATA.append(ROW)
ROW_INDEX += 1
NUM_ROWS = len(DATA)
# sunset column formats
CALENDAR_STYLE.append(('LINEAFTER', (7, 0), (7, -1), 1, colors.black))
CALENDAR_STYLE.append(('GRID', (8, 0), (8, -1), 1, colors.black))
TABLE_1 = Table(DATA, [1.0*inch] + (len(DATA[0])-2)*[0.25*inch] + [None], len(DATA)*[0.19*inch])
TABLE_1.setStyle(TableStyle(CALENDAR_STYLE))
ROW_ADJUST = 21
SCHEDULE_DATA = [
['', '', THIS_YEAR, ''],
['', '', '', ''],
['', '', 'OCCUPANCY SCHEDULE\n(EVENINGS)', ''],
['', 'Weekday', 'Start time', 'End time']]
for i in range(7):
weekDay = (i - 1) % 7 # list Sunday first, just like in calendar table
evt = MY_SCHEDULE.events.get(weekDay)
if evt is not None:
SCHEDULE_DATA.append(['', calendar.day_name[weekDay],
time.strftime(evt.start, '%H:%M'),
time.strftime(evt.stop, '%H:%M')])
SCHEDULE_DATA = SCHEDULE_DATA + (NUM_ROWS-len(SCHEDULE_DATA)-ROW_ADJUST)*[['', '', '', '']]
SCHEDULE_DATA[12] = ['LEGEND', '']
SCHEDULE_DATA[13] = ['01', '%s event' % EVENT_TYPES[EVT_SUNSET]]
SCHEDULE_DATA[14] = ['01', '%s event' % EVENT_TYPES[EVT_FIXED]]
SCHEDULE_DATA[15] = ['01', '%s event' % EVENT_TYPES[EVT_NEVER_ON]]
SCHEDULE_DATA[16] = ['01', 'First day of month']
SCHEDULE_DATA[17] = ['01', 'DST change']
TABLE_2 = Table(SCHEDULE_DATA,
[0.25*inch, 0.95*inch, 0.75*inch, 0.75*inch],
[1.0*inch, 1.0*inch, 0.50*inch] +
(NUM_ROWS-3-ROW_ADJUST)*[0.25*inch],
TableStyle([
# global
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
('ALIGN', (2, 0), (2, 2), 'CENTER'),
('ALIGN', (1, 3), (1, -1), 'LEFT'),
('ALIGN', (2, 3), (-1, -1), 'CENTER'),
# year cell
('FONT', (2, 0), (2, 0), 'Times-Bold', 64),
# header
('BACKGROUND', (1, 3), (-1, 3), DOC_COLOR_GRAY_1),
('TEXTCOLOR', (1, 3), (-1, 3), colors.white),
# grid
('GRID', (1, 3), (-1, len(MY_SCHEDULE.events)+3), 1, colors.black),
# legend
('ALIGN', (0, 12), (-1, -1), 'LEFT'),
('ALIGN', (0, 13), (0, -1), 'CENTER'),
('BACKGROUND', (0, 13), (0, 13), EVENT_COLORS[EVT_SUNSET]),
('BACKGROUND', (0, 14), (0, 14), EVENT_COLORS[EVT_FIXED]),
('BACKGROUND', (0, 15), (0, 15),
EVENT_COLORS[EVT_NEVER_ON]),
('BOX', (0, 16), (0, 16), 1, DOC_COLOR_GRAY_2),
('BACKGROUND', (0, 17), (0, 17), colors.yellow),
])
)
TABLE_MAIN = Table([[TABLE_2, TABLE_1]])
DOCUMENT_ELEMENTS.append(TABLE_MAIN)
# write the document to disk
CALENDAR_DOCUMENT.build(DOCUMENT_ELEMENTS, onFirstPage=onFirstPage)
print(f'Calendar {CALENDAR_FILE_NAME} created.')
|
StarcoderdataPython
|
3455510
|
import numpy as np
import scipy.linalg
from itertools import permutations, combinations_with_replacement
from termcolor import colored
import warnings
from desc.backend import jnp, put
from desc.utils import issorted, isalmostequal, islinspaced
from desc.io import IOAble
class Transform(IOAble):
"""Transforms from spectral coefficients to real space values.
Parameters
----------
grid : Grid
Collocation grid of real space coordinates
basis : Basis
Spectral basis of modes
derivs : int or array-like
* if an int, order of derivatives needed (default=0)
* if an array, derivative orders specified explicitly. Shape should be (N,3),
where each row is one set of partial derivatives [dr, dt, dz]
rcond : float
relative cutoff for singular values for inverse fitting
build : bool
whether to precompute the transforms now or do it later
build_pinv : bool
whether to precompute the pseudoinverse now or do it later
method : {```'auto'``, `'fft'``, ``'direct1'``, ``'direct2'``}
* ``'fft'`` uses fast fourier transforms in the zeta direction, and so must have
equally spaced toroidal nodes, and the same node pattern on each zeta plane
* ``'direct1'`` uses full matrices and can handle arbitrary node patterns and
spectral bases.
* ``'direct2'`` uses a DFT instead of FFT that can be faster in practice
* ``'auto'`` selects the method based on the grid and basis resolution
"""
_io_attrs_ = ["_grid", "_basis", "_derivatives", "_rcond", "_method"]
def __init__(
self,
grid,
basis,
derivs=0,
rcond="auto",
build=True,
build_pinv=False,
method="auto",
):
self._grid = grid
self._basis = basis
self._rcond = rcond if rcond is not None else "auto"
self._derivatives = self._get_derivatives(derivs)
self._sort_derivatives()
self._method = method
self._built = False
self._built_pinv = False
self._set_up()
if build:
self.build()
if build_pinv:
self.build_pinv()
def _set_up(self):
self.method = self._method
self._matrices = {
"direct1": {
i: {j: {k: {} for k in range(4)} for j in range(4)} for i in range(4)
},
"fft": {i: {j: {} for j in range(4)} for i in range(4)},
"direct2": {i: {} for i in range(4)},
}
def _get_derivatives(self, derivs):
"""Get array of derivatives needed for calculating objective function.
Parameters
----------
derivs : int or string
order of derivatives needed, if an int (Default = 0)
OR
array of derivative orders, shape (N,3)
[dr, dt, dz]
Returns
-------
derivatives : ndarray
combinations of derivatives needed
Each row is one set, columns represent the order of derivatives
for [rho, theta, zeta]
"""
if isinstance(derivs, int) and derivs >= 0:
derivatives = np.array([[]])
combos = combinations_with_replacement(range(derivs + 1), 3)
for combo in list(combos):
perms = set(permutations(combo))
for perm in list(perms):
if derivatives.shape[1] == 3:
derivatives = np.vstack([derivatives, np.array(perm)])
else:
derivatives = np.array([perm])
derivatives = derivatives[
derivatives.sum(axis=1) <= derivs
] # remove higher orders
elif np.atleast_1d(derivs).ndim == 1 and len(derivs) == 3:
derivatives = np.asarray(derivs).reshape((1, 3))
elif np.atleast_2d(derivs).ndim == 2 and np.atleast_2d(derivs).shape[1] == 3:
derivatives = np.atleast_2d(derivs)
else:
raise NotImplementedError(
colored(
"derivs should be array-like with 3 columns, or a non-negative int",
"red",
)
)
return derivatives
def _sort_derivatives(self):
"""Sort derivatives."""
sort_idx = np.lexsort(
(self.derivatives[:, 0], self.derivatives[:, 1], self.derivatives[:, 2])
)
self._derivatives = self.derivatives[sort_idx]
def _check_inputs_fft(self, grid, basis):
"""Check that inputs are formatted correctly for fft method."""
if grid.num_nodes == 0 or basis.num_modes == 0:
# trivial case where we just return all zeros, so it doesn't matter
self._method = "fft"
zeta_vals, zeta_cts = np.unique(grid.nodes[:, 2], return_counts=True)
if not isalmostequal(zeta_cts):
warnings.warn(
colored(
"fft method requires the same number of nodes on each zeta plane, "
+ "falling back to direct1 method",
"yellow",
)
)
self.method = "direct1"
return
if not isalmostequal(
grid.nodes[:, :2].T.reshape((2, zeta_cts[0], -1), order="F")
):
warnings.warn(
colored(
"fft method requires that node pattern is the same on each zeta "
+ "plane, falling back to direct1 method",
"yellow",
)
)
self.method = "direct1"
return
id2 = np.lexsort((basis.modes[:, 1], basis.modes[:, 0], basis.modes[:, 2]))
if not issorted(id2):
warnings.warn(
colored(
"fft method requires zernike indices to be sorted by toroidal mode "
+ "number, falling back to direct1 method",
"yellow",
)
)
self.method = "direct1"
return
if (
len(zeta_vals) > 1
and not abs((zeta_vals[-1] + zeta_vals[1]) * basis.NFP - 2 * np.pi) < 1e-14
):
warnings.warn(
colored(
"fft method requires that nodes complete 1 full field period, "
+ "falling back to direct2 method",
"yellow",
)
)
self.method = "direct2"
return
n_vals, n_cts = np.unique(basis.modes[:, 2], return_counts=True)
if len(n_vals) > 1 and not islinspaced(n_vals):
warnings.warn(
colored(
"fft method requires the toroidal modes are equally spaced in n, "
+ "falling back to direct1 method",
"yellow",
)
)
self.method = "direct1"
return
if len(zeta_vals) < len(n_vals):
warnings.warn(
colored(
"fft method can not undersample in zeta, "
+ "num_toroidal_modes={}, num_toroidal_angles={}, ".format(
len(n_vals), len(zeta_vals)
)
+ "falling back to direct2 method",
"yellow",
)
)
self.method = "direct2"
return
if len(zeta_vals) % 2 == 0:
warnings.warn(
colored(
"fft method requires an odd number of toroidal nodes, "
+ "falling back to direct2 method",
"yellow",
)
)
self.method = "direct2"
return
if not issorted(grid.nodes[:, 2]):
warnings.warn(
colored(
"fft method requires nodes to be sorted by toroidal angle in "
+ "ascending order, falling back to direct1 method",
"yellow",
)
)
self.method = "direct1"
return
if len(zeta_vals) > 1 and not islinspaced(zeta_vals):
warnings.warn(
colored(
"fft method requires nodes to be equally spaced in zeta, "
+ "falling back to direct2 method",
"yellow",
)
)
self.method = "direct2"
return
self._method = "fft"
self.lm_modes = np.unique(basis.modes[:, :2], axis=0)
self.num_lm_modes = self.lm_modes.shape[0] # number of radial/poloidal modes
self.num_n_modes = 2 * basis.N + 1 # number of toroidal modes
self.num_z_nodes = len(zeta_vals) # number of zeta nodes
self.N = basis.N # toroidal resolution of basis
self.pad_dim = (self.num_z_nodes - 1) // 2 - self.N
self.dk = basis.NFP * np.arange(-self.N, self.N + 1).reshape((1, -1))
self.fft_index = np.zeros((basis.num_modes,), dtype=int)
offset = np.min(basis.modes[:, 2]) + basis.N # N for sym="cos", 0 otherwise
for k in range(basis.num_modes):
row = np.where((basis.modes[k, :2] == self.lm_modes).all(axis=1))[0]
col = np.where(basis.modes[k, 2] == n_vals)[0]
self.fft_index[k] = self.num_n_modes * row + col + offset
self.fft_nodes = np.hstack(
[
grid.nodes[:, :2][: grid.num_nodes // self.num_z_nodes],
np.zeros((grid.num_nodes // self.num_z_nodes, 1)),
]
)
def _check_inputs_direct2(self, grid, basis):
"""Check that inputs are formatted correctly for direct2 method."""
if grid.num_nodes == 0 or basis.num_modes == 0:
# trivial case where we just return all zeros, so it doesn't matter
self._method = "direct2"
return
zeta_vals, zeta_cts = np.unique(grid.nodes[:, 2], return_counts=True)
if not issorted(grid.nodes[:, 2]):
warnings.warn(
colored(
"direct2 method requires nodes to be sorted by toroidal angle in "
+ "ascending order, falling back to direct1 method",
"yellow",
)
)
self.method = "direct1"
return
if not isalmostequal(zeta_cts):
warnings.warn(
colored(
"direct2 method requires the same number of nodes on each zeta "
+ "plane, falling back to direct1 method",
"yellow",
)
)
self.method = "direct1"
return
if len(zeta_vals) > 1 and not isalmostequal(
grid.nodes[:, :2].T.reshape((2, zeta_cts[0], -1), order="F")
):
warnings.warn(
colored(
"direct2 method requires that node pattern is the same on each "
+ "zeta plane, falling back to direct1 method",
"yellow",
)
)
self.method = "direct1"
return
id2 = np.lexsort((basis.modes[:, 1], basis.modes[:, 0], basis.modes[:, 2]))
if not issorted(id2):
warnings.warn(
colored(
"direct2 method requires zernike indices to be sorted by toroidal "
+ "mode number, falling back to direct1 method",
"yellow",
)
)
self.method = "direct1"
return
n_vals, n_cts = np.unique(basis.modes[:, 2], return_counts=True)
self._method = "direct2"
self.lm_modes = np.unique(basis.modes[:, :2], axis=0)
self.n_modes = n_vals
self.zeta_nodes = zeta_vals
self.num_lm_modes = self.lm_modes.shape[0] # number of radial/poloidal modes
self.num_n_modes = self.n_modes.size # number of toroidal modes
self.num_z_nodes = len(zeta_vals) # number of zeta nodes
self.N = basis.N # toroidal resolution of basis
self.fft_index = np.zeros((basis.num_modes,), dtype=int)
for k in range(basis.num_modes):
row = np.where((basis.modes[k, :2] == self.lm_modes).all(axis=1))[0]
col = np.where(basis.modes[k, 2] == n_vals)[0]
self.fft_index[k] = self.num_n_modes * row + col
self.fft_nodes = np.hstack(
[
grid.nodes[:, :2][: grid.num_nodes // self.num_z_nodes],
np.zeros((grid.num_nodes // self.num_z_nodes, 1)),
]
)
self.dft_nodes = np.hstack(
[np.zeros((self.zeta_nodes.size, 2)), self.zeta_nodes[:, np.newaxis]]
)
def build(self):
"""Build the transform matrices for each derivative order."""
if self.built:
return
if self.basis.num_modes == 0:
self._built = True
return
if self.method == "direct1":
for d in self.derivatives:
self._matrices["direct1"][d[0]][d[1]][d[2]] = self.basis.evaluate(
self.grid.nodes, d, unique=True
)
if self.method in ["fft", "direct2"]:
temp_d = np.hstack(
[self.derivatives[:, :2], np.zeros((len(self.derivatives), 1))]
)
temp_modes = np.hstack([self.lm_modes, np.zeros((self.num_lm_modes, 1))])
for d in temp_d:
self.matrices["fft"][d[0]][d[1]] = self.basis.evaluate(
self.fft_nodes, d, modes=temp_modes, unique=True
)
if self.method == "direct2":
temp_d = np.hstack(
[np.zeros((len(self.derivatives), 2)), self.derivatives[:, 2:]]
)
temp_modes = np.hstack(
[np.zeros((self.num_n_modes, 2)), self.n_modes[:, np.newaxis]]
)
for d in temp_d:
self.matrices["direct2"][d[2]] = self.basis.evaluate(
self.dft_nodes, d, modes=temp_modes, unique=True
)
self._built = True
def build_pinv(self):
"""Build the pseudoinverse for fitting."""
if self.built_pinv:
return
A = self.basis.evaluate(self.grid.nodes, np.array([0, 0, 0]))
# for weighted least squares
A = self.grid.weights[:, np.newaxis] * A
rcond = None if self.rcond == "auto" else self.rcond
if A.size:
self._matrices["pinv"] = scipy.linalg.pinv(A, rcond=rcond)
else:
self._matrices["pinv"] = np.zeros_like(A.T)
self._built_pinv = True
def transform(self, c, dr=0, dt=0, dz=0):
"""Transform from spectral domain to physical.
Parameters
----------
c : ndarray, shape(num_coeffs,)
spectral coefficients, indexed to correspond to the spectral basis
dr : int
order of radial derivative
dt : int
order of poloidal derivative
dz : int
order of toroidal derivative
Returns
-------
x : ndarray, shape(num_nodes,)
array of values of function at node locations
"""
if not self.built:
raise RuntimeError(
"Transform must be precomputed with transform.build() before being used"
)
if self.basis.num_modes != c.size:
raise ValueError(
colored(
"Coefficients dimension ({}) is incompatible with ".format(c.size)
+ "the number of basis modes({})".format(self.basis.num_modes),
"red",
)
)
if len(c) == 0:
return np.zeros(self.grid.num_nodes)
if self.method == "direct1":
A = self.matrices["direct1"][dr][dt][dz]
if isinstance(A, dict):
raise ValueError(
colored("Derivative orders are out of initialized bounds", "red")
)
return jnp.matmul(A, c)
elif self.method == "direct2":
A = self.matrices["fft"][dr][dt]
B = self.matrices["direct2"][dz]
if isinstance(A, dict) or isinstance(B, dict):
raise ValueError(
colored("Derivative orders are out of initialized bounds", "red")
)
c_mtrx = jnp.zeros((self.num_lm_modes * self.num_n_modes,))
c_mtrx = put(c_mtrx, self.fft_index, c).reshape((-1, self.num_n_modes))
cc = jnp.matmul(A, c_mtrx)
return jnp.matmul(cc, B.T).flatten(order="F")
elif self.method == "fft":
A = self.matrices["fft"][dr][dt]
if isinstance(A, dict):
raise ValueError(
colored("Derivative orders are out of initialized bounds", "red")
)
# reshape coefficients
c_mtrx = jnp.zeros((self.num_lm_modes * self.num_n_modes,))
c_mtrx = put(c_mtrx, self.fft_index, c).reshape((-1, self.num_n_modes))
# differentiate
c_diff = c_mtrx[:, :: (-1) ** dz] * self.dk ** dz * (-1) ** (dz > 1)
# re-format in complex notation
c_real = jnp.pad(
(self.num_z_nodes / 2)
* (c_diff[:, self.N + 1 :] - 1j * c_diff[:, self.N - 1 :: -1]),
((0, 0), (0, self.pad_dim)),
mode="constant",
)
c_cplx = jnp.hstack(
(
self.num_z_nodes * c_diff[:, self.N, jnp.newaxis],
c_real,
jnp.fliplr(jnp.conj(c_real)),
)
)
# transform coefficients
c_fft = jnp.real(jnp.fft.ifft(c_cplx))
return jnp.matmul(A, c_fft).flatten(order="F")
def fit(self, x):
"""Transform from physical domain to spectral using weighted least squares fit.
Parameters
----------
x : ndarray, shape(num_nodes,)
values in real space at coordinates specified by grid
Returns
-------
c : ndarray, shape(num_coeffs,)
spectral coefficients in basis
"""
if not self.built_pinv:
raise RuntimeError(
"Transform must be precomputed with transform.build_pinv() before being used"
)
if x.ndim > 1:
weights = self.grid.weights.reshape((-1, 1))
else:
weights = self.grid.weights
return jnp.matmul(self.matrices["pinv"], weights * x)
def project(self, y):
"""Project vector y onto basis.
Equivalent to dotting the transpose of the transform matrix into y, but
somewhat more efficient in some cases by using FFT instead of full transform
Parameters
----------
y : ndarray
vector to project. Should be of size (self.grid.num_nodes,)
Returns
-------
b : ndarray
vector y projected onto basis, shape (self.basis.num_modes)
"""
if not self.built:
raise RuntimeError(
"Transform must be precomputed with transform.build() before being used"
)
if self.grid.num_nodes != y.size:
raise ValueError(
colored(
"y dimension ({}) is incompatible with ".format(y.size)
+ "the number of grid nodes({})".format(self.grid.num_nodes),
"red",
)
)
if self.method == "direct1":
A = self.matrices["direct1"][0][0][0]
return jnp.matmul(A.T, y)
elif self.method == "direct2":
A = self.matrices["fft"][0][0]
B = self.matrices["direct2"][0]
yy = jnp.matmul(A.T, y.reshape((-1, self.num_z_nodes), order="F"))
return jnp.matmul(yy, B).flatten()[self.fft_index]
elif self.method == "fft":
A = self.matrices["fft"][0][0]
# this was derived by trial and error, but seems to work correctly
# there might be a more efficient way...
a = jnp.fft.fft(A.T @ y.reshape((A.shape[0], -1), order="F"))
cdn = a[:, 0]
cr = a[:, 1 : 1 + self.N]
b = jnp.hstack(
[-cr.imag[:, ::-1], cdn.real[:, np.newaxis], cr.real]
).flatten()[self.fft_index]
return b
def change_resolution(
self, grid=None, basis=None, build=True, build_pinv=False, method="auto"
):
"""Re-build the matrices with a new grid and basis.
Parameters
----------
grid : Grid
Collocation grid of real space coordinates
basis : Basis
Spectral basis of modes
build : bool
whether to recompute matrices now or wait until requested
method : {"auto", "direct1", "direct2", "fft"}
method to use for computing transforms
"""
if grid is None:
grid = self.grid
if basis is None:
basis = self.basis
if not self.grid.eq(grid):
self._grid = grid
self._built = False
self._built_pinv = False
if not self.basis.eq(basis):
self._basis = basis
self._built = False
self._built_pinv = False
self.method = method
if build:
self.build()
if build_pinv:
self.build_pinv()
@property
def grid(self):
"""Grid : collocation grid for the transform."""
return self.__dict__.setdefault("_grid", None)
@grid.setter
def grid(self, grid):
if not self.grid.eq(grid):
self._grid = grid
if self.method == "fft":
self._check_inputs_fft(self.grid, self.basis)
if self.method == "direct2":
self._check_inputs_direct2(self.grid, self.basis)
if self.built:
self._built = False
self.build()
if self.built_pinv:
self._built_pinv = False
self.build_pinv()
@property
def basis(self):
"""Basis : spectral basis for the transform."""
return self.__dict__.setdefault("_basis", None)
@basis.setter
def basis(self, basis):
if not self.basis.eq(basis):
self._basis = basis
if self.method == "fft":
self._check_inputs_fft(self.grid, self.basis)
if self.method == "direct2":
self._check_inputs_direct2(self.grid, self.basis)
if self.built:
self._built = False
self.build()
if self.built_pinv:
self._built_pinv = False
self.build_pinv()
@property
def derivatives(self):
"""Set of derivatives the transform can compute.
Returns
-------
derivatives : ndarray
combinations of derivatives needed
Each row is one set, columns represent the order of derivatives
for [rho, theta, zeta]
"""
return self._derivatives
def change_derivatives(self, derivs, build=True):
"""Change the order and updates the matrices accordingly.
Doesn't delete any old orders, only adds new ones if not already there
Parameters
----------
derivs : int or array-like
* if an int, order of derivatives needed (default=0)
* if an array, derivative orders specified explicitly.
shape should be (N,3), where each row is one set of partial derivatives
[dr, dt, dz]
build : bool
whether to build transforms immediately or wait
"""
new_derivatives = self._get_derivatives(derivs)
new_not_in_old = (new_derivatives[:, None] == self.derivatives).all(-1).any(-1)
derivs_to_add = new_derivatives[~new_not_in_old]
self._derivatives = np.vstack([self.derivatives, derivs_to_add])
self._sort_derivatives()
if len(derivs_to_add):
# if we actually added derivatives and didn't build them, then its not built
self._built = False
if build:
# we don't update self._built here because it is still built from before
# but it still might have unbuilt matrices from new derivatives
self.build()
@property
def matrices(self):
"""dict of ndarray : transform matrices such that x=A*c."""
return self.__dict__.setdefault(
"_matrices",
{
"direct1": {
i: {j: {k: {} for k in range(4)} for j in range(4)}
for i in range(4)
},
"fft": {i: {j: {} for j in range(4)} for i in range(4)},
"direct2": {i: {} for i in range(4)},
},
)
@property
def num_nodes(self):
"""int : number of nodes in the collocation grid."""
return self.grid.num_nodes
@property
def num_modes(self):
"""int : number of modes in the spectral basis."""
return self.basis.num_modes
@property
def modes(self):
"""ndarray: collocation nodes."""
return self.grid.nodes
@property
def nodes(self):
"""ndarray: spectral mode numbers."""
return self.basis.nodes
@property
def built(self):
"""bool : whether the transform matrices have been built."""
return self.__dict__.setdefault("_built", False)
@property
def built_pinv(self):
"""bool : whether the pseudoinverse matrix has been built."""
return self.__dict__.setdefault("_built_pinv", False)
@property
def rcond(self):
"""float: reciprocal condition number for inverse transform."""
return self.__dict__.setdefault("_rcond", "auto")
@property
def method(self):
"""{``'direct1'``, ``'direct2'``, ``'fft'``}: method of computing transform."""
return self.__dict__.setdefault("_method", "direct1")
@method.setter
def method(self, method):
old_method = self.method
if method == "auto" and self.basis.N == 0:
self.method = "direct1"
elif method == "auto":
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.method = "fft"
elif method == "fft":
self._check_inputs_fft(self.grid, self.basis)
elif method == "direct2":
self._check_inputs_direct2(self.grid, self.basis)
elif method == "direct1":
self._method = "direct1"
else:
raise ValueError("Unknown transform method: {}".format(method))
if self.method != old_method:
self._built = False
def __repr__(self):
"""String form of the object."""
return (
type(self).__name__
+ " at "
+ str(hex(id(self)))
+ " (method={}, basis={}, grid={})".format(
self.method, repr(self.basis), repr(self.grid)
)
)
|
StarcoderdataPython
|
9716016
|
<reponame>helix84/activae
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 CENATIC: Centro Nacional de Referencia de
# Aplicacion de las TIC basadas en Fuentes Abiertas, Spain.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# Neither the name of the CENATIC nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You may contact the copyright holder at: Fundacion CENATIC, Edificio
# de Servicios Sociales: C/ Vistahermosa, 1, 3ra planta, 06200
# Almendralejo (Badajoz), Spain
import sys
import re
import Util
# Los flujos de trabajo para cada una de las páginas se pueden
# configurar fácilmente. Para cada página
SCRIPT = {
# Gestión de permisos
'PageACL':
{'__default__': '/', # Por defecto, no se proporciona acceso
'admin': '/admin/acl',
'director': '/acl/general',
'editor': '/acl/general',
'guionista': '/acl/general',
'redactor': '/acl/general',
'documentalista': '/acl/general',
'script': '/acl/general',
'disenador': '/acl/general',
'reportero': '/acl/general',
'efectos': '/acl/general',
'montador': '/acl/general',
},
# Gestión de formatos de activos
'PageAdmin':
{'__default__': '/', # Por defecto, no se proporciona acceso
'admin': '/admin/general',
},
# Gestión de formatos de activos
'PageAdminFormats':
{'__default__': '/', # Por defecto, no se proporciona acceso
'admin': '/admin/format/general',
},
#Gestión de licencias
'PageAdminLicenses':
{'__default__': '/', # Por defecto, no se proporciona acceso
'admin': '/admin/license/general',
},
'PageAdminProfiles':
{'__default__': '/', # Por defecto, no se proporciona acceso
'admin': '/admin/profile/general',
},
'PageAdminTypes':
{'__default__': '/', # Por defecto, no se proporciona acceso
'admin': '/admin/type/general',
},
'PageAdminUsers':
# Cada usuario puede modificar algunos de sus propios datos
{'__default__': '/admin/user/%(user_id)d',
'admin': '/admin/user/general',
},
'PageAsset':
{'__default__': '/', # Por defecto, no se proporciona acceso
'admin': '/asset/general',
'director': '/asset/general',
'productor': '/asset/publish',
'realizador': '/asset/publish',
'editor': '/asset/general',
'guionista': '/asset/general',
'redactor': '/asset/general',
'documentalista': '/asset/general',
'script': '/asset/general',
'disenador': '/asset/general',
'reportero': '/asset/general',
'efectos': '/asset/general',
'montador': '/asset/general',
},
'PageAssetCreate':
{'__default__': '/asset', # Por defecto, lo que diga PageAsset
},
'PageCollection':
{'__default__': '/', # Por defecto, no se proporciona acceso
'admin': '/collection/general',
'director': '/collection/general',
'editor': '/collection/general',
'guionista': '/collection/general',
'redactor': '/collection/general',
'documentalista': '/collection/general',
'script': '/collection/general',
'disenador': '/collection/general',
'reportero': '/collection/general',
'efectos': '/collection/general',
'montador': '/collection/general',
},
# Página de inicio
'PageIndex':
{'__default__': '/index', # Por defecto, indice
'admin': '/admin',
},
'PageLookup':
{'__default__': '/lookup/general',
},
'PageReport':
{'__default__': '/report/assets',
'admin': '/report/general',
},
'PageTranscode':
{'__default__': '/', # Por defecto, no se proporciona acceso
'admin': '/transcode/%(param)s',
'director': '/transcode/%(param)s',
'productor': '/transcode/%(param)s',
'realizador': '/transcode/%(param)s',
'guionista': '/transcode/%(param)s',
},
'PageUpload':
{'__default__': '/', # Por defecto, no se proporciona acceso
'admin': '/asset/upload/new',
'director': '/asset/upload/new',
'editor': '/asset/upload/new',
'guionista': '/asset/upload/new',
'redactor': '/asset/upload/new',
'documentalista': '/asset/upload/new',
'script': '/asset/upload/new',
'disenador': '/asset/upload/new',
'reportero': '/asset/upload/new',
'efectos': '/asset/upload/new',
'montador': '/asset/upload/new',
},
}
"""
Variables utilizables en las rutas:
-----------------------------------
%(request)s url de entrada
%(profile_id)d id del perfil del usuario
%(profile_name)d nombre del perfil del usuario
%(user_id)d id del usuario
%(user_name)s nombre del usuario
%(param)s parámetros (desde la última '/' en adelante)
Perfiles:
---------
{
'__default__': '/', # Destino para perfil por defecto
'admin': '',
'director': '',
'productor': '',
'realizador': '',
'editor': '',
'regidor': '',
'guionista': '',
'redactor': '',
'documentalista': '',
'script': '',
'disenador': '',
'reportero': '',
'efectos': '',
'montador': '',
'actor': '',
'presentador': '',
},
Targets:
--------
PageACL
'/acl/general/?' , default_user
'/acl/asset/\d+' , edit_acl
'/acl/collection/\d+' , edit_acl
'/admin/acl/?' , default_admin
PageAdmin
'/admin/?' , init
'/admingeneral/?' , default
PageAdminFormats
'/admin/format/general' , default
'/admin/format/del/.+' , del_format
'/admin/format/edit/.+' , edit_format
'/admin/format/edit/del/\d+/\d+$' , del_format_target
PageAdminLicenses
'/admin/license/general', default
'/admin/license/del/.+' , del_license
'/admin/license/new$' % , new_license
'/admin/license/edit/.+', edit_license
PageAdminProfiles
'/admin/profile/general', default
'/admin/profile/.+' , edit_profile
'/admin/profile/del/.+' , del_profile
'/admin/profile/new' , new_profile
PageAdminTypes
'/admin/type/general' , default
'/admin/type/del/.+' , del_type
'/admin/type/new$' , new_type
'/admin/type/edit/.+' , edit_type
PageAdminUsers
'/admin/user/general' , default
'/admin/user/\d+' , edit_user
'/admin/user/new' , new_user
'/admin/user/del/\d+' , del_user
PageAsset
/asset/general' , default
/asset/del/.+' , del_asset
/asset/new/?$' , add_asset
/asset/new/name=(.+?/ref=(.+?' , add_asset
/asset/evolve/.+' , evolve_asset
/asset/edit/?$' , edit_asset_page
/asset/publish/?$' , publish_asset
PageAssetCreate
'/asset/create/?' , init)
'/asset/new/?' , add_asset)
'/asset/new/name=(.+)?/ref=(.+)?', add_asset
'/asset/evolve/.+' , evolve_asset
PageCollection
'/collection/general' , default
'/collection/new$' , add_collection
'/collection/edit/.+' , edit_collection
'/collection/meta/\d+$', show_meta
PageIndex
'/index' , default
'/' , init
PageLookup
'/lookup/general' , default
'/lookup/?.+' , lookup_perform
PageReport
'/report/?' , default
'/report/assets' , report_assets
'/report/system' , report_system
PageTranscode
'/transcode/\d+/?' , transcode
PageUpload
'/asset/upload/?' , UploadNew
'/asset/upload/new/?' , UploadNew
'/asset/upload/evolve/\d+/?' , UploadEvolve
"""
def get_pages ():
txt = open(__file__).read()
supported = SCRIPT.keys()
documented = re.findall("\n(Page.+?)\n", txt)
existing = Util.get_all_pages()
return (supported, documented, existing)
def test ():
supported, documented, existing = get_pages()
print '#1 Páginas con soporte de workflow: %d' % len(supported)
print '#2 Páginas documentadas: %d' % len(documented)
print '#3 Páginas totales: %d' % len(existing)
not_documented = list(set(supported) - set(documented))
assert len(not_documented) == 0
print '#4 Páginas sin documentar (0): OK'
assert len(supported) == len(documented)
print '#5 Documentadas todas las páginas con soporte de Workflow: OK'
not_supported = list(set(existing) - set(supported))
print '#6 Páginas sin workflow (%d): %s' % (len(not_supported),not_supported)
if __name__ == '__main__':
test()
|
StarcoderdataPython
|
279342
|
<reponame>GuzalBulatova/sktime<gh_stars>0
# -*- coding: utf-8 -*-
"""SummaryClassifier test code."""
import numpy as np
from numpy import testing
from sklearn.ensemble import RandomForestClassifier
from sktime.classification.feature_based import SummaryClassifier
from sktime.datasets import load_basic_motions, load_unit_test
def test_summary_classifier_on_unit_test_data():
"""Test of SummaryClassifier on unit test data."""
# load unit test data
X_train, y_train = load_unit_test(split="train", return_X_y=True)
X_test, y_test = load_unit_test(split="test", return_X_y=True)
indices = np.random.RandomState(0).choice(len(y_train), 10, replace=False)
# train summary stat classifier
sc = SummaryClassifier(
random_state=0, estimator=RandomForestClassifier(n_estimators=10)
)
sc.fit(X_train, y_train)
# assert probabilities are the same
probas = sc.predict_proba(X_test.iloc[indices])
testing.assert_array_almost_equal(
probas, summary_classifier_unit_test_probas, decimal=2
)
def test_summary_classifier_on_basic_motions():
"""Test of SummaryClassifier on basic motions."""
# load basic motions data
X_train, y_train = load_basic_motions(split="train", return_X_y=True)
X_test, y_test = load_basic_motions(split="test", return_X_y=True)
indices = np.random.RandomState(4).choice(len(y_train), 10, replace=False)
# train summary stat classifier
sc = SummaryClassifier(
random_state=0, estimator=RandomForestClassifier(n_estimators=10)
)
sc.fit(X_train.iloc[indices], y_train[indices])
# assert probabilities are the same
probas = sc.predict_proba(X_test.iloc[indices])
testing.assert_array_almost_equal(
probas, summary_classifier_basic_motions_probas, decimal=2
)
summary_classifier_unit_test_probas = np.array(
[
[
0.0,
1.0,
],
[
0.9,
0.1,
],
[
0.0,
1.0,
],
[
0.9,
0.1,
],
[
0.9,
0.1,
],
[
1.0,
0.0,
],
[
0.8,
0.2,
],
[
0.6,
0.4,
],
[
0.9,
0.1,
],
[
1.0,
0.0,
],
]
)
summary_classifier_basic_motions_probas = np.array(
[
[
0.0,
0.0,
0.3,
0.7,
],
[
0.5,
0.2,
0.1,
0.2,
],
[
0.0,
0.0,
0.8,
0.2,
],
[
0.0,
1.0,
0.0,
0.0,
],
[
0.1,
0.1,
0.2,
0.6,
],
[
0.0,
0.0,
0.3,
0.7,
],
[
0.5,
0.2,
0.1,
0.2,
],
[
0.0,
0.0,
0.8,
0.2,
],
[
0.1,
0.9,
0.0,
0.0,
],
[
0.1,
0.9,
0.0,
0.0,
],
]
)
# def print_array(array):
# print("[")
# for sub_array in array:
# print("[")
# for value in sub_array:
# print(value.astype(str), end="")
# print(", ")
# print("],")
# print("]")
#
#
# if __name__ == "__main__":
# X_train, y_train = load_unit_test(split="train", return_X_y=True)
# X_test, y_test = load_unit_test(split="test", return_X_y=True)
# indices = np.random.RandomState(0).choice(len(y_train), 10, replace=False)
#
# sc_u = SummaryClassifier(
# random_state=0,
# estimator=RandomForestClassifier(n_estimators=10),
# )
#
# sc_u.fit(X_train, y_train)
# probas = sc_u.predict_proba(X_test.iloc[indices])
# print_array(probas)
#
# X_train, y_train = load_basic_motions(split="train", return_X_y=True)
# X_test, y_test = load_basic_motions(split="test", return_X_y=True)
# indices = np.random.RandomState(4).choice(len(y_train), 10, replace=False)
#
# sc_m = SummaryClassifier(
# random_state=0,
# estimator=RandomForestClassifier(n_estimators=10),
# )
#
# sc_m.fit(X_train.iloc[indices], y_train[indices])
# probas = sc_m.predict_proba(X_test.iloc[indices])
# print_array(probas)
|
StarcoderdataPython
|
3503585
|
<filename>XrayDataPlots/plotDstarsMeasurability.py<gh_stars>0
# Description: Generate the list of dstars and measurability as lists for plotting by matplotlib.
# Source: NA
"""
from iotbx.reflection_file_reader import any_reflection_file
hkl_file = any_reflection_file("${1:3hz7}.mtz")
miller_arrays = hkl_file.as_miller_arrays(merge_equivalents=False)
Iobs = miller_arrays[1]
# Set up the bins
n_bins = 50
binner = Iobs.setup_binner(n_bins=n_bins)
# binner.show_summary()
used = list(binner.range_used())
selections = [binner.selection(i) for i in used]
# make d_centers for the x-axis
d_star_power = 1.618034
centers = binner.bin_centers(d_star_power)
d_centers = list(centers**(-1 / d_star_power))
# make list of the measurabilities by resolution bin
meas = [Iobs.select(sel).measurability() for sel in selections]
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams["savefig.dpi"] = 600
mpl.rcParams["figure.dpi"] = 600
fig, ax = plt.subplots(figsize=[3.25, 2.])
ax.scatter(d_centers,lnmeans,c="k",alpha=0.3,s=5.5)
ax.set_xlim(8, 1.5) # decreasing time
ax.set_xlabel(r"$d^*$ in $\AA$",fontsize=12)
ax.set_ylabel("ln(I)",fontsize=12)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
ax.grid(False)
plt.savefig("${1:3hz7}measureability.pdf",bbox_inches="tight")
plt.show()
"""
from iotbx.reflection_file_reader import any_reflection_file
hkl_file = any_reflection_file("3hz7.mtz")
miller_arrays = hkl_file.as_miller_arrays(merge_equivalents=False)
Iobs = miller_arrays[1]
# Set up the bins
n_bins = 50
binner = Iobs.setup_binner(n_bins=n_bins)
# binner.show_summary()
used = list(binner.range_used())
selections = [binner.selection(i) for i in used]
# make d_centers for the x-axis
d_star_power = 1.618034
centers = binner.bin_centers(d_star_power)
d_centers = list(centers**(-1 / d_star_power))
# make list of the measurabilities by resolution bin
meas = [Iobs.select(sel).measurability() for sel in selections]
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams["savefig.dpi"] = 600
mpl.rcParams["figure.dpi"] = 600
fig, ax = plt.subplots(figsize=[3.25, 2.])
ax.scatter(d_centers,lnmeans,c="k",alpha=0.3,s=5.5)
ax.set_xlim(8, 1.5) # decreasing time
ax.set_xlabel(r"$d^*$ in $\AA$",fontsize=12)
ax.set_ylabel("ln(I)",fontsize=12)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
ax.grid(False)
plt.savefig("3hz7measureability.pdf",bbox_inches="tight")
plt.show()
|
StarcoderdataPython
|
11308355
|
<gh_stars>0
""" 068 - Faça um programa que jogue par ou ímpar com o computador. O jogo só será interrompido quando o jogador
perder, mostrando o total de vitórias consecutivas que ele conquistou no final do jogo. """
from random import randint
print('=-='*11)
print('VAMOS JOGAR PAR OU ÍMPAR'.center(33))
print('=-='*11)
v = 0
while True:
c = randint(0, 10)
j = int(input('Diga um valor: '))
pi = str(input('Par ou Ímpar [P/I]? ')).strip().upper()[0]
s = c + j
print('-'*33)
print(f'''Você jogou {j} e o computador, {c}.
Total = {s} → ''', end='')
if s % 2 == 0:
r = 'P'
print('PAR')
else:
r = 'I'
print('ÍMPAR')
print('-'*33)
if pi == r:
print('''Você VENCEU!
Vamos jogar novamente...''')
print('=-=' * 11)
v += 1
else:
print('Você PERDEU.')
print('=-=' * 11)
break
print(f'GAME OVER!', end=' ')
if v == 0:
print('Você não venceu nenhuma!')
elif v == 1:
print('Você só venceu uma vez...')
else:
print(f'Você venceu {v} vezes.')
|
StarcoderdataPython
|
58001
|
def incrementing_time(start=2000, increment=1):
while True:
yield start
start += increment
def monotonic_time(start=2000):
return incrementing_time(start, increment=0.000001)
def static_time(value):
while True:
yield value
|
StarcoderdataPython
|
3219714
|
<reponame>tucan9389/mobile-pose-estimation-for-TF2
# Copyright 2019 <NAME> (<EMAIL>)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================
# -*- coding: utf-8 -*-
import os
import tensorflow as tf
def get_check_pointer_callback(model_path, output_name):
checkpoint_path = os.path.join(model_path, output_name + ".hdf5") # ".ckpt"
check_pointer_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
save_weights_only=False,
verbose=1)
return check_pointer_callback
def get_tensorboard_callback(log_path, output_name):
log_path = os.path.join(log_path, output_name)
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_path, histogram_freq=0, write_graph=True,
write_images=True)
return tensorboard_callback
def get_img_tensorboard_callback(log_path, output_name, images, labels, model):
# tensorboard image
def _show_image_for_debugging(numpy_img):
from PIL import Image
# import io
# height, width, channel = numpy_img.shape
img = Image.fromarray(numpy_img)
img.save('my.png')
img.show()
file_writer = tf.summary.create_file_writer(os.path.join(log_path, output_name))
def log_tensorboard_predicted_images(epoch, logs):
# Use the model to predict the values from the validation dataset.
# batch_size = 6
# images, labels = dataloader_valid.get_images(80, batch_size)
predictions = model.predict(images)
# Log the confusion matrix as an image summary.
from data_loader.pose_image_processor import PoseImageProcessor
# summary_str = []
predicted_images = []
for i in range(images.shape[0]):
image = images[i, :, :, :]
label = labels[i, :, :, :]
prediction = predictions[i, :, :, :]
numpy_img = PoseImageProcessor.display_image(image, true_heat=label, pred_heat=prediction, as_numpy=True)
numpy_img = numpy_img / 255
predicted_images.append(numpy_img)
with file_writer.as_default():
# Don't forget to reshape.
tf.summary.image("predict from validation dataset", predicted_images, max_outputs=10, step=epoch)
# Define the per-epoch callback.
img_tensorboard_callback = tf.keras.callbacks.LambdaCallback(on_epoch_end=log_tensorboard_predicted_images)
return img_tensorboard_callback
|
StarcoderdataPython
|
12862013
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""Ant_Algorithm.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Zjt1SInhoaFEqSmsPjEfWQE7jhugAvZA
# **ANT ALGORITHM BY KELOMPOK 9**
1. <NAME> - 18081010002
2. <NAME> - 18081010013
3. <NAME> - 18081010033
4. <NAME> - 18081010036
5. <NAME> - 18081010126
# **1. Import Libraries**
"""
#**********************************IMPORT LIBRARIES*******************************
#Library untuk operasi matematika
import math
#Library untuk membentuk dan memanipulasi segala bentuk graf dan jaringan
import networkx as nx
#Library untuk visualisasi grafik
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
from pylab import *
#Library untuk mendukung komputasi numerik
import numpy as np
#Library untuk analisis dan manipulasi data tingkat tinggi
import pandas as pn
#Library untuk untuk mengukur waktu eksekusi
from time import time
"""# **2. Read Data**"""
read_jarak_antarkota = pn.read_excel('https://raw.githubusercontent.com/devancakra/Ant-Algorithm-Pencarian-Rute-Tercepat/master/jarak_antarkota.xlsx')
read_kota = pn.read_excel('https://raw.githubusercontent.com/devancakra/Ant-Algorithm-Pencarian-Rute-Tercepat/master/kota.xlsx')
arr_kota = np.array(read_kota)
arr_jarak_antarkota = np.array(read_jarak_antarkota)
#Grafik Map
def Grafik_Map(simpul,sudut):
plt.style.use('ggplot')
fig = plt.figure(figsize=(25.200,15))
x = simpul[:,0]
y = simpul[:,1]
#Mencetak garis
plt.plot( x, y, '--',x, y, 'p',color='#FF8000',#Warna garis
markersize=35, #Ukuran objek
linewidth=2,
markerfacecolor='#00E4B6',#Warna objek
markeredgecolor='#00FF00',#Warna tepi objek
markeredgewidth=2)#Ketebalan tepi titik
plt.title("KOTA")
legend(("Jalur","Kota"), prop = {'size': 30}, loc='lower right')
plt.grid(True)
#plt.tight_layout()
for i in range (len(simpul)):
#Pengidentifikasi kota (nomor setiap kota)
plt.annotate("Kota"+str(i+1),
size=8,
xy=simpul[i],
horizontalalignment='center',
verticalalignment='center') #Kotak dalam anotasi bbox = dict (facecolor = 'none', edgecolor = 'black', boxstyle = 'round, pad = 1')
Grafik_Map(arr_kota,arr_jarak_antarkota)
"""# **3. Implementasi Algoritma Ant**
1. Transisi status, Pembaruan Feromon Lokal, Pembaruan Feromon Global
"""
import random
class Rute_Cepat_ACO:
#Sub class Tepi = objek tepi yang menyimpan simpul a, simpul b, jarak antara ab, dan feromon koneksi
class Tepi:
def __init__(self, a, b, jarak_ab, feromon_koneksi):
self.a = a
self.b = b
self.jarak_ab = jarak_ab
self.feromon_koneksi = feromon_koneksi
#Sub class Semut
class Semut:
def __init__(self, alpha, beta, num_simpul, tepi):
self.alpha = alpha
self.beta = beta
self.num_simpul = num_simpul
self.tepi = tepi
self.survei = None
self.jarak_tempuh = 0.0
#Metode untuk memilih simpul berikutnya yang akan dikunjungi, memvalidasi bahwa node tersebut belum pernah dikunjungi.
def _select_simpul(self):
persebaran = 0.0 #mulai persebaran dari nol,lalu pilih jalur secara acak
k_simpul_sepi = [node for node in range(self.num_simpul) if node not in self.jalur] #Simpul Sepi
heuristic_total = 0.0 #Peningkatan jarak jalan
for simpul_sepi in k_simpul_sepi:
heuristic_total += self.tepi[self.jalur[-1]][simpul_sepi].jarak_ab
for simpul_sepi in k_simpul_sepi:
persebaran += pow(self.tepi[self.jalur[-1]][simpul_sepi].feromon_koneksi, self.alpha) * \
pow((heuristic_total / self.tepi[self.jalur[-1]][simpul_sepi].jarak_ab), self.beta)
nilai_random = random.uniform(0.0, persebaran)
pos_sebar = 0.0
for simpul_sepi in k_simpul_sepi:
pos_sebar += pow(self.tepi[self.jalur[-1]][simpul_sepi].feromon_koneksi, self.alpha) * \
pow((heuristic_total / self.tepi[self.jalur[-1]][simpul_sepi].jarak_ab), self.beta)
if pos_sebar >= nilai_random:
return simpul_sepi
#Metode untuk menemukan cara untuk memilih jalur
def survei_jalur(self):
self.jalur = [random.randint(0, self.num_simpul - 1)]
while len(self.jalur) < self.num_simpul:
self.jalur.append(self._select_simpul())
return self.jalur
#Jarak antara satu simpul dengan simpul lainnya
def get_jarak_tempuh(self):
self.jarak_tempuh = 0.0
for i in range(self.num_simpul):
self.jarak_tempuh += self.tepi[self.jalur[i]][self.jalur[(i + 1) % self.num_simpul]].jarak_ab
return self.jarak_tempuh
# Definisi atribut untuk objek kelas Rute_Cepat_ACO
def __init__(self, mode='ACS', jumlah_semut=10, alpha=1.0, beta=3.0, rho=0.1,
feromon_tersimpan=1.0, feromon_koneksi=1.0, langkah=100, v_simpul=None, m_jarak=None, posting=None):
self.mode = mode
self.jumlah_semut = jumlah_semut
self.rho = rho
self.feromon_tersimpan = feromon_tersimpan
self.langkah = langkah
self.num_simpul = len(v_simpul)
self.v_simpul = v_simpul
if posting is not None:
self.posting = posting
else:
self.posting = range(1, self.num_simpul + 1)
#Deklarasi list tepi kosong
self.tepi = [[None] * self.num_simpul for _ in range(self.num_simpul)]
#Membuat objek tepi untuk setiap tuple i = a j = b dan menyimpannya di list tepi, pada akhirnya array n*n dibuat, dengan n = jumlah simpul
for i in range(self.num_simpul):
for j in range(self.num_simpul):
self.tepi[i][j] = self.tepi[j][i] = self.Tepi(i, j, m_jarak[i][j],feromon_koneksi) #Simpan objek bertipe Endge di list tepi
self.semut = [self.Semut(alpha, beta, self.num_simpul, self.tepi) for _ in range(self.jumlah_semut)] #Buat semut dalam array yang terdiri dari objek kelas Semut
self.jalur_terbaik = None #Atribut jalan terbaik
self.jarak_terbaik = float("inf") #Atribut untuk jarak minimum
#Metode yang menambahkan feromon ke jalan
def _add_feromon(self, jalur, jarak_ab, weight=1.0):
add_feromon = self.feromon_tersimpan / jarak_ab
for i in range(self.num_simpul):
self.tepi[jalur[i]][jalur[(i + 1) % self.num_simpul]].feromon_koneksi += jarak_ab * add_feromon
#Siklus perutean setiap semut
def _acs(self):
for k_langkah in range(self.langkah):
for k_semut in self.semut:
self._add_feromon(k_semut.survei_jalur(), k_semut.get_jarak_tempuh())
if k_semut.jarak_tempuh < self.jarak_terbaik:
self.jalur_terbaik = k_semut.jalur
self.jarak_terbaik = k_semut.jarak_tempuh
for i in range(self.num_simpul):
for j in range(i + 1, self.num_simpul):
self.tepi[i][j].feromon_koneksi *= (1.0 - self.rho)
#Metode yang dimuat saat menjalankan kelas
def run(self):
self._acs()
#Membuat grafik hasil
def plot(self, line_width=1, point_radius=math.sqrt(2.0), annotation_size=10, dpi=120, save=True, name=None):
fig = plt.figure(figsize=(25.200,15))
x = [self.v_simpul[i][0] for i in self.jalur_terbaik]
x.append(x[0])
y = [self.v_simpul[i][1] for i in self.jalur_terbaik]
y.append(y[0])
#Mencetak garis
plt.plot( x, y, '--',x, y, 'p',color='#FF8000',#Warna garis
markersize=35, #Ukuran objek
linewidth=2,
markerfacecolor='#00E4B6',#Warna objek
markeredgecolor='#00FF00',#Warna tepi objek
markeredgewidth=2)#Ketebalan tepi titik
#Membuat tambalan untuk diletakkan di kanan atas gambar
handle1 = mpatches.Patch(color='white', label='Semut: '+str(self.jumlah_semut))
handle2 = mpatches.Patch(color='white', label='Langkah: '+str(self.langkah))
handle3 = mpatches.Patch(color='white', label='Rho: '+str(self.rho))
ax = plt.gca().add_artist(plt.legend(handles=[handle1,handle2,handle3],prop = {'size': 12}))
#Hasil
handle4 = mpatches.Patch(color='white', label='Jarak tempuh: '+str(round(self.jarak_terbaik, 2)))
ax = plt.gca().add_artist(plt.legend(handles=[handle4],prop = {'size': 12},loc='lower left'))
#Data grafik
plt.title(" Perutean ACS - "+self.mode)
legend(("Jalur","Kota"), prop = {'size': 30}, loc='lower right')
plt.grid(True)
#Pengenal kota (nomor tiap kota)
for i in self.jalur_terbaik:
plt.annotate("Kota"+str(i+1),
size=8,
xy=self.v_simpul[i],
horizontalalignment='center',
verticalalignment='center') #Kotak dalam anotasi bbox = dict (facecolor = 'none', edgecolor = 'black', boxstyle = 'round, pad = 1')
plt.show()
return self.jarak_terbaik
"""2. Konfigurasi perutean"""
#Mendefinisikan fungsi untuk mengirim konfigurasi yang berbeda secara teratur
def config(tipe, ts, lg, t_evap):
acs = Rute_Cepat_ACO(mode=tipe, jumlah_semut=ts, langkah=lg, v_simpul=arr_kota, m_jarak=arr_jarak_antarkota, rho=t_evap)
acs.run()
jarak_jalur_akhir = acs.plot()
return jarak_jalur_akhir
#Konfigurasi yang berbeda didefinisikan
txt_config = [] #Teks konfigurasi
jumlah_semut = [] #Ukuran koloni
langkah = [] #Jumlah langkah total
rho = [] #Tingkat penguapan fermones ANTARA 0 dan 1
txt_config.append('Konfigurasi 1'); jumlah_semut.append(50); langkah.append(10); rho.append(0.1);
txt_config.append('Konfigurasi 2'); jumlah_semut.append(100); langkah.append(10); rho.append(0.1);
txt_config.append('Konfigurasi 3'); jumlah_semut.append(250); langkah.append(10); rho.append(0.1);
txt_config.append('Konfigurasi 4'); jumlah_semut.append(50); langkah.append(30); rho.append(0.5);
txt_config.append('Konfigurasi 5'); jumlah_semut.append(90); langkah.append(40); rho.append(0.5);
txt_config.append('Konfigurasi 6'); jumlah_semut.append(150); langkah.append(30); rho.append(0.5);
txt_config.append('Konfigurasi 7'); jumlah_semut.append(50); langkah.append(50); rho.append(0.1);
txt_config.append('Konfigurasi 8'); jumlah_semut.append(200); langkah.append(90); rho.append(0.1);
txt_config.append('Konfigurasi 9'); jumlah_semut.append(150); langkah.append(50); rho.append(0.1);
txt_config.append('Konfigurasi 10'); jumlah_semut.append(80); langkah.append(100); rho.append(0.5);
txt_config.append('Konfigurasi 11'); jumlah_semut.append(100); langkah.append(100); rho.append(0.5);
txt_config.append('Konfigurasi 12'); jumlah_semut.append(150); langkah.append(100); rho.append(0.5);
jarak_ab = [] #Vektor perpindahan akhir di setiap konfigurasi
tempo = [] #Vektor waktu eksekusi algoritma di setiap konfigurasi
for i in range(len(txt_config)):
start_time = time()
jarak_ab.append(config(txt_config[i], jumlah_semut[i], langkah[i], rho[i]))
tempo.append(time()-start_time)
"""3. Pemilihan Hasil Terbaik"""
#Grafik hasil tiga rute terbaik berdasarkan jarak
index1=jarak_ab.index(sorted(jarak_ab,reverse=False)[0])
index2=jarak_ab.index(sorted(jarak_ab,reverse=False)[1])
index3=jarak_ab.index(sorted(jarak_ab,reverse=False)[2])
if index2==index1:
index2=index2+1
if index2==index3:
index3=index3+1
plt.style.use('ggplot')
fig = plt.figure(figsize=(10.80,5))
plt.bar(range(3),sorted(jarak_ab,reverse=False)[0:3], edgecolor='#93329F', color='#5D87B6')
plt.xticks(range(3),(txt_config[index1],txt_config[index2],txt_config[index3]), rotation=70)
plt.ylim(min(jarak_ab[index1],jarak_ab[index2],jarak_ab[index3])-1, max(jarak_ab[index1],jarak_ab[index2],jarak_ab[index3])+1)
plt.title("Hasil konfigurasi terbaik berdasarkan jarak")
plt.ylabel('Jarak tempuh')
plt.xlabel('Konfigurasi rute yang digunakan (jarak)\n\n')
plt.show()
#Grafik hasil tiga rute terbaik berdasarkan waktu
plt.style.use('ggplot')
fig = plt.figure(figsize=(10.80,5))
plt.bar(range(3),(tempo[index1],tempo[index2],tempo[index3]), edgecolor='#282623', color='#138d90')
plt.xticks(range(3),(txt_config[index1],txt_config[index2],txt_config[index3]), rotation=70)
plt.ylim(min(tempo[index1],tempo[index2],tempo[index3])-1, max(tempo[index1],tempo[index2],tempo[index3])+10)
plt.title("Hasil konfigurasi terbaik berdasarkan waktu")
plt.ylabel('Waktu tempuh')
plt.xlabel('Konfigurasi rute yang digunakan (waktu)\n\n')
plt.show()
#Grafik hasil tiga rute terbaik berdasarkan jalur
plt.style.use('ggplot')
fig = plt.figure(figsize=(10.80,5))
plt.bar(range(3),(langkah[index1],langkah[index2],langkah[index3]), edgecolor='#F387FF', color='#0D3E00')
plt.xticks(range(3),(txt_config[index1],txt_config[index2],txt_config[index3]), rotation=70)
plt.ylim(min(langkah[index1],langkah[index2],langkah[index3])-1, max(langkah[index1],langkah[index2],langkah[index3])+1)
plt.title("Hasil konfigurasi terbaik berdasarkan jalur")
plt.ylabel('Jalur tempuh')
plt.xlabel('Konfigurasi rute yang digunakan (jalur)\n\n')
plt.show()
|
StarcoderdataPython
|
11328497
|
#!/usr/bin/env python3
from io import BytesIO
import ipywidgets as widgets
class _pre():
def __init__(self, value=''):
self.widget = widgets.HTML()
self.value = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
self.widget.value = '<pre>{}</pre>'.format(value)
class _img():
def __init__(self, value=None):
self.widget = widgets.Image(format='png')
self.value = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
if value is None:
return
data = BytesIO()
value.savefig(data, format='png', facecolor=self.value.get_facecolor())
data.seek(0)
self.widget.value = data.read()
|
StarcoderdataPython
|
170547
|
<filename>dashboard/migrations/0049_auto_20210902_0254.py
# Generated by Django 3.2.5 on 2021-09-02 02:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0048_auto_20210831_0719'),
]
operations = [
migrations.AddField(
model_name='participantcount',
name='year',
field=models.CharField(db_index=True, max_length=4, null=True, verbose_name='Tahun'),
),
migrations.AlterField(
model_name='participantcount',
name='count',
field=models.CharField(db_index=True, help_text='Mulai dari angka berapa, untuk nomor pendaftaran.', max_length=10, verbose_name='Hitungan'),
),
]
|
StarcoderdataPython
|
1787733
|
<reponame>DdOtzen/espCarStuff
"""
Blynk is a platform with iOS and Android apps to control
Arduino, Raspberry Pi and the likes over the Internet.
You can easily build graphic interfaces for all your
projects by simply dragging and dropping widgets.
Downloads, docs, tutorials: http://www.blynk.cc
Sketch generator: http://examples.blynk.cc
Blynk community: http://community.blynk.cc
Social networks: http://www.fb.com/blynkapp
http://twitter.com/blynk_app
This example shows how to initialize your ESP8266/ESP32 board
and connect it to Blynk.
Don't forget to change WIFI_SSID, WIFI_PASS and BLYNK_AUTH ;)
"""
import BlynkLib
import network
import machine
#WIFI_SSID = 'Maolin'
#WIFI_PASS = '<PASSWORD>'
WIFI_SSID = 'AlsLUG'
WIFI_PASS = '<PASSWORD>'
BLYNK_AUTH = '<KEY>'
print("Connecting to WiFi...")
wifi = network.WLAN(network.STA_IF)
wifi.active(True)
wifi.connect(WIFI_SSID, WIFI_PASS)
while not wifi.isconnected():
pass
print('IP:', wifi.ifconfig()[0])
print("Connecting to Blynk...")
blynk = BlynkLib.Blynk(BLYNK_AUTH)
motor_H = machine.Pin(2, machine.Pin.OUT)
motor_H.off()
motorPwm_H = machine.PWM( motor_H, freq=1000, duty=0 )
dir_H = machine.Pin(4, machine.Pin.OUT)
motor_V = machine.Pin(16, machine.Pin.OUT)
motorPwm_V = machine.PWM( motor_V, freq=1000, duty=0 )
dir_V = machine.Pin(17, machine.Pin.OUT)
speed = 0
direction = 0
@blynk.on("connected")
def blynk_connected(ping):
print('Blynk ready. Ping:', ping, 'ms')
def runLoop():
motor_H.off()
motor_V.off()
while True:
blynk.run()
machine.idle()
def limit( min, n, max):
return sorted([min, n, max])[1]
def UpdateSpeed():
print('Speed: {} direction: {}'.format( speed, direction ) )
speed_H = limit( -1023, speed - direction, 1023 )
speed_V = limit( -1023, speed + direction, 1023 )
print('Spped V: {} Speed_H: {}'.format( speed_V, speed_H ) )
if speed_V < 0:
dir_V.on()
motorPwm_V.duty( 1023 + speed_V )
else:
dir_V.off()
motorPwm_V.duty( speed_V )
if speed_H < 0:
dir_H.on()
motorPwm_V.duty( 1023 + speed_H )
else:
dir_H.off()
motorPwm_H.duty( speed_H )
# Register Virtual Pins
@blynk.VIRTUAL_WRITE(0)
def my_write_handler(value):
global speed
speed= int(value[0] )
# print('Current V0 value: {}'.format( intVal ) )
@blynk.VIRTUAL_WRITE(1)
def my_write_handler(value):
global direction
direction = int(value[0] )
# print('Current V1 value: {}'.format( intVal ) )
UpdateSpeed()
# Run blynk in the main thread:
runLoop()
# Or, run blynk in a separate thread (unavailable for esp8266):
#import _thread
#_thread.stack_size(5*1024)
#_thread.start_new_thread(runLoop, ())
# Note:
# Threads are currently unavailable on some devices like esp8266
# ESP32_psRAM_LoBo has a bit different thread API:
# _thread.start_new_thread("Blynk", runLoop, ())
|
StarcoderdataPython
|
9645136
|
<reponame>Erfi/dorathewordexplorer<gh_stars>0
"""darvag URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, re_path
from django.contrib.auth import views as auth_views
from flashcard import views as flashcard_views
from tags import views as tag_views
from accounts import views as account_views
urlpatterns = [
path('', flashcard_views.home, name='home'),
path('signup/', account_views.signup, name='signup'),
path('login/', auth_views.LoginView.as_view(template_name='login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(), name='logout'),
path('reset/', auth_views.PasswordResetView.as_view(
template_name='password_reset.html',
email_template_name='password_reset_email.html',
subject_template_name='password_reset_subject.txt'),
name='password_reset'),
path('reset/done/', auth_views.PasswordResetDoneView.as_view(
template_name='password_reset_done.html'),
name='password_reset_done'),
re_path(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
auth_views.PasswordResetConfirmView.as_view(template_name='password_reset_confirm.html'),
name='password_reset_confirm'),
path('reset/complete/', auth_views.PasswordResetCompleteView.as_view(template_name='password_reset_complete.html'),
name='password_reset_complete'),
path('settings/password/', auth_views.PasswordChangeView.as_view(template_name='password_change.html'),
name='password_change'),
path('settings/password/done', auth_views.PasswordChangeDoneView.as_view(template_name='password_change_done.html'),
name='password_change_done'),
path('admin/', admin.site.urls),
path('user/dashboard/', flashcard_views.DeckListView.as_view(), name='dashboard'),
path('deck/add/', flashcard_views.DeckCreateView.as_view(), name='add_deck'),
path('deck/<int:deck_id>/', flashcard_views.EntryListView.as_view(), name='view_deck'),
path('deck/<int:deck_id>/edit/', flashcard_views.DeckUpdateView.as_view(), name='edit_deck'),
path('deck/<int:deck_id>/delete/', flashcard_views.DeckDeleteView.as_view(), name='delete_deck'),
path('deck/<int:deck_id>/entry/add/', flashcard_views.EntryCreateView.as_view(), name='add_entry'),
path('deck/<int:deck_id>/entry/<int:entry_id>/edit/', flashcard_views.EntryUpdateView.as_view(), name='edit_entry'),
path('deck/<int:deck_id>/entry/<int:entry_id>/delete/', flashcard_views.EntryDeleteView.as_view(),
name='delete_entry'),
path('tags/', tag_views.TagListView.as_view(), name='list_tags'),
path('tags/create/', tag_views.TagCreateView.as_view(), name='create_tag'),
path('tags/<int:tag_id>/edit', tag_views.TagUpdateView.as_view(), name='update_tag'),
path('tags/<int:tag_id>/delete', tag_views.TagDeleteView.as_view(), name='delete_tag')
]
|
StarcoderdataPython
|
8151496
|
import argparse
from beautifultable import BeautifulTable
from stests.core import cache
from stests.core import factory
from stests.core.utils import args_validator
from stests.core.utils import cli as utils
from stests.core.utils import env
# CLI argument parser.
ARGS = argparse.ArgumentParser("List set of nodes registered with a network.")
# CLI argument: network name.
ARGS.add_argument(
"--net",
default=env.get_network_name(),
dest="network",
help="Network name {type}{id}, e.g. nctl1.",
type=args_validator.validate_network,
)
# Table columns.
COLS = [
("ID", BeautifulTable.ALIGN_LEFT),
("Host:Port", BeautifulTable.ALIGN_LEFT),
("Type", BeautifulTable.ALIGN_LEFT),
]
def main(args):
"""Entry point.
:param args: Parsed CLI arguments.
"""
# Pull data.
network_id=factory.create_network_id(args.network)
network = cache.infra.get_network(network_id)
if network is None:
utils.log_warning(f"Network {args.network} is unregistered.")
return
data = cache.infra.get_nodes(network_id)
if not data:
utils.log_warning(f"Network {args.network} has no nodes.")
return
# Set cols/rows.
cols = [i for i, _ in COLS]
rows = map(lambda i: [
i.label_index,
f"{i.host}:{i.port_rpc}",
i.typeof.name,
], sorted(data, key=lambda i: i.index))
# Set table.
t = utils.get_table(cols, rows)
# Set table alignments.
for key, aligmnent in COLS:
t.column_alignments[key] = aligmnent
# Render.
print(t)
print(f"{network_id.name} node count = {len(data)}")
# Entry point.
if __name__ == '__main__':
main(ARGS.parse_args())
|
StarcoderdataPython
|
6500490
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from contrail_api_cli.command import Command, Arg
from contrail_api_cli.resource import Resource
from contrail_api_cli.exceptions import ResourceNotFound
from ..utils import RouteTargetAction
class SetGlobalASN(Command):
description = "Set the global ASN to the API server"
asn = Arg(nargs='?',
help="Autonomous System Number (default: %(default)s)",
type=RouteTargetAction.asn_type,
default=64512)
def __call__(self, asn=None):
global_config = Resource('global-system-config',
fq_name='default-global-system-config',
check=True)
global_config['autonomous_system'] = asn
global_config.save()
class GetGlobalASN(Command):
description = "Get global ASN"
def __call__(self):
try:
global_config = Resource('global-system-config',
fq_name='default-global-system-config',
fetch=True)
if global_config.get('autonomous_system'):
return json.dumps({
"asn": global_config.get('autonomous_system')
})
except ResourceNotFound:
pass
return json.dumps([])
|
StarcoderdataPython
|
3364601
|
"""
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import pickle as cPickle
import numpy as np
from scipy.sparse import csr_matrix
import random
import matplotlib.pyplot as plt
from collections import defaultdict
import math
import re
import copy
def _get_term_features(actions, UNIGRAMS_LIST, BIGRAMS_LIST):
"""
Given the list of unigrams and bigrams
returns the BOW feature vectors
"""
unigrams, bigrams = set([]), set([])
f = {}
for action in actions:
unigrams = unigrams | set(action['unigrams'])
bigrams = bigrams | set([tuple(x) for x in action['bigrams']])
f.update(dict(map(lambda x: ("UNIGRAM_" + str(x), 1 if x in unigrams else 0), UNIGRAMS_LIST)))
f.update(dict(map(lambda x: ("BIGRAM_" + str(x), 1 if tuple(x) in bigrams else 0), BIGRAMS_LIST)))
return f
|
StarcoderdataPython
|
239174
|
<filename>ci_release_publisher/temporary_store_release.py
# -*- coding: utf-8 -*-
from enum import Enum, unique
from github import GithubObject
import logging
import re
from . import config
from . import enum
from . import env
from . import github
from . import travis
_tag_suffix = 'tmp'
def _tag_name(travis_branch, travis_build_number, travis_job_number):
return '{}-{}-{}-{}-{}'.format(config.tag_prefix, travis_branch, travis_build_number, travis_job_number, _tag_suffix)
def _break_tag_name(tag_name):
if not tag_name.startswith(config.tag_prefix) or not tag_name.endswith(_tag_suffix):
return None
tag_name = tag_name[len(config.tag_prefix):-len(_tag_suffix)]
m = re.match('^-(?P<branch>.*)-(?P<build_number>\d+)-(?P<job_number>\d+)-$', tag_name)
if not m:
return None
return {'branch': m.group('branch'), 'build_number': m.group('build_number'), 'job_number': m.group('job_number')}
def _tag_name_tmp(travis_branch, travis_build_number, travis_job_number):
return '{}{}'.format(config.tag_prefix_tmp, _tag_name(travis_branch, travis_build_number, travis_job_number))
def _break_tag_name_tmp(tag_name):
if not tag_name.startswith(config.tag_prefix_tmp):
return None
tag_name = tag_name[len(config.tag_prefix_tmp):]
return _break_tag_name(tag_name)
def publish_args(parser):
parser.add_argument('--release-name', type=str, help='Release name text. If not specified a predefined text is used.')
parser.add_argument('--release-body', type=str, help='Release body text. If not specified a predefined text is used.')
def publish_with_args(args, releases, artifact_dir, github_api_url, travis_api_url):
publish(releases, artifact_dir, args.release_name, args.release_body, github_api_url)
def publish(releases, artifact_dir, release_name, release_body, github_api_url):
github_token = env.required('CIRP_GITHUB_ACCESS_TOKEN') if env.optional('CIRP_GITHUB_ACCESS_TOKEN') else env.required('GITHUB_ACCESS_TOKEN')
github_repo_slug = env.required('CIRP_GITHUB_REPO_SLUG') if env.optional('CIRP_GITHUB_REPO_SLUG') else env.required('TRAVIS_REPO_SLUG')
travis_branch = env.required('TRAVIS_BRANCH')
travis_commit = env.required('TRAVIS_COMMIT')
travis_build_number = env.required('TRAVIS_BUILD_NUMBER')
travis_job_number = env.required('TRAVIS_JOB_NUMBER').split('.')[1]
travis_job_id = env.required('TRAVIS_JOB_ID')
travis_job_web_url = env.required('TRAVIS_JOB_WEB_URL')
tag_name = _tag_name(travis_branch, travis_build_number, travis_job_number)
logging.info('* Creating a temporary store release with the tag name "{}".'.format(tag_name))
tag_name_tmp = _tag_name_tmp(travis_branch, travis_build_number, travis_job_number)
logging.info('Creating a release with the tag name "{}".'.format(tag_name_tmp))
release = github.github(github_token, github_api_url).get_repo(github_repo_slug).create_git_release(
tag=tag_name_tmp,
name=release_name if release_name else
'Temporary store release {}'
.format(tag_name),
message=release_body if release_body else
('Auto-generated temporary release containing build artifacts of [Travis-CI job #{}]({}).\n\n'
'This release was created by the CI Release Publisher script, which will automatically delete it in the current or following builds.\n\n'
'You should not manually delete this release, unless you don\'t use the CI Release Publisher script anymore.')
.format(travis_job_id, travis_job_web_url),
draft=True,
prerelease=True,
target_commitish=travis_commit if not env.optional('CIRP_GITHUB_REPO_SLUG') else GithubObject.NotSet)
github.upload_artifacts(artifact_dir, release)
logging.info('Changing the tag name from "{}" to "{}".'.format(tag_name_tmp, tag_name))
release.update_release(name=release.title, message=release.body, prerelease=release.prerelease, target_commitish=release.target_commitish, draft=release.draft, tag_name=tag_name)
@unique
class CleanupScope(Enum):
CURRENT_JOB = 1
CURRENT_BUILD = 2
PREVIOUS_FINISHED_BUILDS = 3
@unique
class CleanupRelease(Enum):
COMPLETE = 1
INCOMPLETE = 2
def cleanup_args(parser):
parser.add_argument('--scope', nargs='+', type=str, choices=enum.enum_to_arg_choices(CleanupScope), required=True, help="Scope to cleanup.")
parser.add_argument('--release', nargs='+', type=str, choices=enum.enum_to_arg_choices(CleanupRelease), required=True, help="Release to cleanup.")
parser.add_argument('--on-nonallowed-failure', default=False, action='store_true',
help='Cleanup only if the current build has a job that both has failed and doesn\'t have allow_failure set on it, '
'i.e. the current build is going to fail once the current stage finishes running.')
def cleanup_with_args(args, releases, github_api_url, travis_api_url):
cleanup(releases, enum.arg_choices_to_enum(CleanupScope, args.scope), enum.arg_choices_to_enum(CleanupRelease, args.release),
args.on_nonallowed_failure, args.github_api_url, travis_api_url)
def cleanup(releases, scopes, release_completenesses, on_nonallowed_failure, github_api_url, travis_api_url):
github_token = env.required('CIRP_GITHUB_ACCESS_TOKEN') if env.optional('CIRP_GITHUB_ACCESS_TOKEN') else env.required('GITHUB_ACCESS_TOKEN')
github_repo_slug = env.required('CIRP_GITHUB_REPO_SLUG') if env.optional('CIRP_GITHUB_REPO_SLUG') else env.required('TRAVIS_REPO_SLUG')
travis_repo_slug = env.required('TRAVIS_REPO_SLUG')
travis_branch = env.required('TRAVIS_BRANCH')
travis_build_number = env.required('TRAVIS_BUILD_NUMBER')
travis_build_id = env.required('TRAVIS_BUILD_ID')
travis_job_number = env.required('TRAVIS_JOB_NUMBER').split('.')[1]
travis_test_result = env.optional('TRAVIS_TEST_RESULT')
travis_allow_failure = env.optional('TRAVIS_ALLOW_FAILURE')
travis_token = env.optional('CIRP_TRAVIS_ACCESS_TOKEN')
logging.info('* Deleting temporary store releases.')
if on_nonallowed_failure:
# Jobs are marked as failed in the API only once they complete, so if we want to check if the current job has failed,
# which has obviously hasn't completed yet since we are running, we have to check the env variables instead of
# Travis-CI API, the API won't tell us this.
has_nonallowed_failure = travis_test_result == '1' and travis_allow_failure == 'false'
if not has_nonallowed_failure:
# Alright, now check the API for other complete jobs
has_nonallowed_failure = travis.Travis(travis_api_url, travis_token, github_token).build_has_failed_nonallowfailure_job(travis_build_id)
if not has_nonallowed_failure:
return
branch_unfinished_build_numbers = []
if CleanupScope.PREVIOUS_FINISHED_BUILDS in scopes:
branch_unfinished_build_numbers = travis.Travis(travis_api_url, travis_token, github_token).branch_unfinished_build_numbers(travis_repo_slug, travis_branch)
def should_delete(r):
if not r.draft:
return False
info = None
if not info and CleanupRelease.COMPLETE in release_completenesses:
info = _break_tag_name(r.tag_name)
if not info and CleanupRelease.INCOMPLETE in release_completenesses:
info = _break_tag_name_tmp(r.tag_name)
if not info:
return False
if info['branch'] != travis_branch:
return False
result = False
if not result and CleanupScope.CURRENT_JOB in scopes:
result = int(info['build_number']) == int(travis_build_number) and int(info['job_number']) == int(travis_job_number)
if not result and CleanupScope.CURRENT_BUILD in scopes:
result = int(info['build_number']) == int(travis_build_number)
if not result and CleanupScope.PREVIOUS_FINISHED_BUILDS in scopes:
result = int(info['build_number']) < int(travis_build_number) and info['build_number'] not in branch_unfinished_build_numbers
return result
releases_to_delete = [r for r in releases if should_delete(r)]
# Sort for a better presentation when printing
releases_to_delete = sorted(releases_to_delete, key=lambda r: not not _break_tag_name(r.tag_name))
releases_to_delete = sorted(releases_to_delete, key=lambda r: int(_break_tag_name(r.tag_name)['job_number'] if _break_tag_name(r.tag_name)
else _break_tag_name_tmp(r.tag_name)['job_number']))
releases_to_delete = sorted(releases_to_delete, key=lambda r: int(_break_tag_name(r.tag_name)['build_number'] if _break_tag_name(r.tag_name)
else _break_tag_name_tmp(r.tag_name)['build_number']))
for release in releases_to_delete:
try:
github.delete_release_with_tag(release, github_token, github_api_url, github_repo_slug)
except Exception as e:
logging.warning('{}: {}'.format(type(e).__name__, e))
def download(releases, artifact_dir):
github_token = env.required('CIRP_GITHUB_ACCESS_TOKEN') if env.optional('CIRP_GITHUB_ACCESS_TOKEN') else env.required('GITHUB_ACCESS_TOKEN')
travis_branch = env.required('TRAVIS_BRANCH')
travis_build_number = env.required('TRAVIS_BUILD_NUMBER')
logging.info('* Downloading temporary store releases created during this build.')
# FIXME(nurupo): once Python 3.8 is out, use Assignemnt Expression to prevent expensive _break_tag_name() calls https://www.python.org/dev/peps/pep-0572/
releases_stored = [r for r in releases if r.draft and
_break_tag_name(r.tag_name) and
_break_tag_name(r.tag_name)['branch'] == travis_branch and
int(_break_tag_name(r.tag_name)['build_number']) == int(travis_build_number)]
# Sort for a better presentation when printing
releases_stored = sorted(releases_stored, key=lambda r: int(_break_tag_name(r.tag_name)['job_number']))
if not releases_stored:
logging.info('Couldn\'t find any temporary store releases for this build.')
return
for release in releases_stored:
github.download_artifcats(github_token, release, artifact_dir)
|
StarcoderdataPython
|
5091871
|
<gh_stars>1-10
from rest_framework import viewsets, filters
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from core.models import Detail, Order
from order.serializers import OrderStatusUpdateSerializer, \
OrderStatusRetrieveSerializer, OrderSerializer, \
DetailSerializer, OrderDetailRetrieveSerializer
class BaseOrderAttrViewSet(viewsets.ModelViewSet):
"""Base ViewSet for user owned order attributes"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
"""Return objects for the current authenticated user only"""
assigned_only = bool(
int(self.request.query_params.get('assigned_only', 0))
)
queryset = self.queryset
if assigned_only:
queryset = queryset.filter(order__isnull=False)
return queryset.filter(
user=self.request.user
).order_by('-id').distinct()
def perform_create(self, serializer):
"""Create a new object"""
serializer.save(user=self.request.user)
class DetailViewSet(BaseOrderAttrViewSet):
"""Manage pizza order in the database"""
queryset = Detail.objects.all()
serializer_class = DetailSerializer
class OrderViewSet(viewsets.ModelViewSet):
"""Manage orders in the database"""
serializer_class = OrderSerializer
queryset = Order.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
filter_backends = [filters.OrderingFilter]
def _params_to_ints(self, qs):
"""Convert a list of string IDs to a list of integers"""
return [int(str_id) for str_id in qs.split(',')]
def get_queryset(self):
"""Retrieve the orders for the authenticated user"""
detail = self.request.query_params.get('detail')
queryset = self.queryset
if detail:
detail_ids = self._params_to_ints(detail)
queryset = queryset.filter(detail__id__in=detail_ids)
return queryset.filter(user=self.request.user)
def get_serializer_class(self):
"""Return appropriate serializer class"""
if self.action == 'retrieve':
return OrderDetailRetrieveSerializer
return self.serializer_class
def perform_create(self, serializer):
"""Create a new order"""
serializer.save(user=self.request.user)
class OrderRetrieveUpdateStatusView(viewsets.ModelViewSet):
"""
View to get or update a specific order detail.
"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
queryset = Order.objects.all()
def get_serializer_class(self):
method = self.request.method
serializer_class = OrderStatusRetrieveSerializer
if method == 'PUT':
serializer_class = OrderStatusUpdateSerializer
return serializer_class
|
StarcoderdataPython
|
5052204
|
import logging
from ...util import none_or
from ..errors import MalformedResponse
from .collection import Collection
logger = logging.getLogger("mw.api.collections.users")
class Users(Collection):
"""
A collection of information about users
"""
PROPERTIES = {'blockinfo', 'implicitgroups', 'groups', 'registration',
'emailable', 'editcount', 'gender'}
SHOW = {'minor', '!minor', 'patrolled', '!patrolled'}
MAX_REVISIONS = 50
def query(self, *args, **kwargs):
"""
Get a user's metadata.
See `<https://www.mediawiki.org/wiki/API:Users>`_
:Parameters:
users : str
The usernames of the users to be retrieved.
properties : set(str)
Include additional pieces of information
blockinfo - Tags if the user is blocked, by whom, and
for what reason
groups - Lists all the groups the user(s) belongs to
implicitgroups - Lists all the groups a user is automatically
a member of
rights - Lists all the rights the user(s) has
editcount - Adds the user's edit count
registration - Adds the user's registration timestamp
emailable - Tags if the user can and wants to receive
email through [[Special:Emailuser]]
gender - Tags the gender of the user. Returns "male",
"female", or "unknown"
"""
done = False
while not done:
us_docs, query_continue = self._query(*args, **kwargs)
for doc in us_docs:
yield doc
if query_continue is None or len(us_docs) == 0:
done = True
else:
kwargs['query_continue'] = query_continue
def _query(self, users, query_continue=None, properties=None):
params = {
'action': "query",
'list': "users"
}
params['ususers'] = self._items(users, type=str)
params['usprop'] = self._items(properties, levels=self.PROPERTIES)
if query_continue is not None:
params.update(query_continue)
doc = self.session.get(params)
try:
if 'query-continue' in doc:
query_continue = doc['query-continue']['users']
else:
query_continue = None
us_docs = doc['query']['users']
return us_docs, query_continue
except KeyError as e:
raise MalformedResponse(str(e), doc)
|
StarcoderdataPython
|
45011
|
<reponame>4dcu-be/WinstonCubeSim<filename>main.py<gh_stars>0
from cubedata import RichCubeData as CubeData
import click
@click.command()
@click.option("--url", is_flag=True)
@click.argument("path", required=True, type=str)
def run(path, url):
cube_data = CubeData(draft_size=90)
if url:
cube_data.read_cube_url(path)
else:
cube_data.read_cube_csv(path)
cube_data.start_game()
if __name__ == "__main__":
run()
|
StarcoderdataPython
|
8140612
|
<reponame>bio-hpc/metascreener
############################################################################
#
# Author: <NAME>
#
# Copyright: <NAME> TSRI 201
#
#############################################################################
"""
Module implementing the commands that are present when instanciating
an AppFramework class or AppFramework derived class.
- loadModuleCommand
- UndoCommand
- RedoCommand
- BrowseCommandsCommand
"""
# $Header: /opt/cvs/AppFramework/notOptionalCommands.py,v 1.7 2014/07/18 00:07:04 annao Exp $
#
# $Id: notOptionalCommands.py,v 1.7 2014/07/18 00:07:04 annao Exp $
#
## FIXME these should become part of the AppFramework rather than commands
##
import os, sys
from string import join
from mglutil.util.packageFilePath import findFilePath, findModulesInPackage
from AppFramework.AppCommands import AppCommand
from mglutil.events import Event
class NewUndoEvent(Event):
pass
class AfterUndoEvent(Event):
pass
class AfterRedoEvent(Event):
pass
commandslist=[]
cmd_docslist={}
def findAllAppPackages():
"""Returns a list of package names found in sys.path"""
packages = {}
for p in ['.']+sys.path:
flagline = []
if not os.path.exists(p) or not os.path.isdir(p):
continue
files = os.listdir(p)
for f in files:
pdir = os.path.join(p, f)
if not os.path.isdir(pdir):
continue
if os.path.exists( os.path.join( pdir, '__init__.py')) :
fptr =open("%s/__init__.py" %pdir)
Lines = fptr.readlines()
flagline =filter(lambda x:x.startswith("packageContainsVFCommands"),Lines)
if not flagline ==[]:
if not packages.has_key(f):
packages[f] = pdir
return packages
class UndoCommand(AppCommand):
"""pops undo string from the stack and executes it in the AppFrameworks
scope
\nPackage : AppFramework
\nModule : notOptionalCommands.py
\nClass : UndoCommand
\nCommand : Undo
\nSynopsis:\n
None <- Undo()
"""
def validateUserPref(self, value):
try:
val = int(value)
if val >-1:
return 1
else:
return 0
except:
return 0
def onAddCmdToApp(self):
doc = """Number of commands that can be undone"""
self.app().userpref.add( 'Number of Undo', 100,
validateFunc=self.validateUserPref,
doc=doc)
def addUndoCall(self, cmdList, name):
#print self.name, "addUndoCall for:", name
# FIXME handle user pref
self.cmdStack.append( (cmdList, name) )
maxLen = self.app().userpref['Number of Undo']['value']
if maxLen>0 and len(self.cmdStack)>maxLen:
forget = self.cmdStack[:-maxLen]
self.cmdStack = self.cmdStack[-maxLen:]
for cmdList, name in forget:
for cmd, args, kw in cmdList:
if hasattr(cmd, "handleForgetUndo"):
cmd.handleForgetUndo(*args, **kw)
#the gui part of the application should register the following
# event listener that will update the label if necessary
event = NewUndoEvent(objects=self.cmdStack, command=self)
self.app().eventHandler.dispatchEvent(event)
def doit(self, **kw):
"""
pop cmdList from stack and execute each cmd in cmdlList
"""
stack = self.cmdStack
if stack:
cmdList, name = stack.pop()
ncmds = len(cmdList)
self._cmdList = ([], name) # this list will gather undoCommands generated during the undo
for i, (cmd, args, kw) in enumerate(cmdList):
self.inUndo = ncmds-i-1
if hasattr(cmd, 'name'):
name = cmd.name # this is a command
else:
#a method or a function
if hasattr(cmd, "im_class"):
name = "%s.%s" % (cmd.im_class, cmd.__name__)
else:
name = cmd.__name__
#msg = "Failed to run %s from %s"%(name, self.name)
cmd( *args, **kw)
#self.app().GUI.safeCall( cmd, msg, *args, **kw)
self._cmdList = () # this list will gather undoCommands generated during the undo
#self.inUndo = True
#for cmd, args, kw in cmdList:
# cmd( *args, **kw)
#self.inUndo = False
self.inUndo = -1
else:
self.app().warningMsg('ERROR: Undo called for %s when undo stack is empty'%\
self.name)
event = AfterUndoEvent(objects=self.cmdStack, command=self)
self.app().eventHandler.dispatchEvent(event)
def __init__(self):
AppCommand.__init__(self)
# cmdStack is a list of tuples providing 1-a list of commands to execute and 2 a name for this operation
# the list of commands is in the following format [ (cmd, *args, **kw) ]
self.cmdStack = []
self.inUndo = -1 # will be 0 or a positive integer while we are executing command(s) to undo last operation.
self._cmdList = () # this tuple will contain a list that will collect negation of commands during a loop over commands
# corresponding to an Undo (or Redo in subclassed command)
def checkArguments(self, **kw):
"""None<---NEWundo()
"""
kw['topCommand'] = 0
return (), kw
def resetCmdStack(self):
#remove all items from self.cmdStack
if len(self.cmdStack):
del(self.cmdStack)
self.cmdStack = []
event = AfterUndoEvent(objects=self.cmdStack, command=self)
self.app().eventHandler.dispatchEvent(event)
def cleanCmdStack(self, molecule):
# when a molecule is deleted in an not undoable way we need to
# remove references to this molecule from the undo/redo cmd stack
# go over the stored commands (commmand.cmdStack) and remove the ones that
# contain given molecule in their argument tuples.
removedEntries = [] # will contain indices of entries that need to be
# removed from command.cmdStack
#print "cleaning up %s cmdStack" % command.name
from MolKit.tree import TreeNode, TreeNodeSet
# loop over undo/redo comand stack
for i, cmdEntry in enumerate(self.cmdStack):
cmdList = cmdEntry[0] # get a handle to (cmd *args, **kw)
remove = False
# loop over commands in this undo block
for j, cmd in enumerate(cmdList):
if remove: break
for arg in cmd[1]: # FIXME this loop might not be necessary
# if the only place for molecular fragments
# is the first argument
#if the arg is a molecular fragment
if isinstance(arg, TreeNode):
if arg.top==molecule:
removedEntries.append(i)
remove = True
break
elif isinstance(arg, TreeNodeSet):
deleted = molecule.findType(arg.elementType)
new = arg - deleted
if len(new)==0:
removedEntries.append(i)
remove = True
break
#else:
# replace arg that contains reference to this molecule and some other
# molecule(s) by new.
#cmdList[j] = (cmdList[j][0], (new,), cmdList[j][2])
# FIXME: this is not sufficient , we need to find a way to
# check all kw of the command to see if they contain vectors of colors, etc
# for this molecule.
#for now we remove all cmdStack entries containig reference to this molecule.
elif new == arg:
remove = False
break
else:
removedEntries.append(i)
remove = True
break
else: #not TreNodeSet , not TreNode
#FIX ME (ex: AddBondsCommand takes a list of atom pairs ....
# maybe there are more cases like this one)
# remove it - for now
removedEntries.append(i)
remove = True
break
# remove selected entries
n = 0
for i in removedEntries:
self.cmdStack.pop(i-n)
n = n+1
event = AfterUndoEvent(objects=self.cmdStack, command=self)
self.app().eventHandler.dispatchEvent(event)
class RedoCommand(UndoCommand):
"""pops redo cmdList from the stack and executes it in the AppFrameworks
scope
\nPackage : AppFramework
\nModule : notOptionalCommands.py
\nClass : RedoCommand
\nCommand : Undo
\nSynopsis:\n
None <- Undo()
"""
pass
class BrowseCommandsCommand(AppCommand):
"""Command to load dynamically either modules or individual commands
in the Application.
\nPackage : AppFramework
\nModule : notOptionalCommands.py
\nClass : BrowseCommandsCommand
\nCommand : browseCommands
\nSynopsis:\n
None <-- browseCommands(module, commands=None, package=None, **kw)
\nRequired Arguements:\n
module --- name of the module(eg:colorCommands)
\nOptional Arguements:\n
commnads --- one list of commands to load
\npackage --- name of the package to which module belongs(eg:Pmv,Vision)
"""
def __init__(self):
AppCommand.__init__(self)
self.allPack = {}
self.packMod = {}
self.allPackFlag = False
self.txtGUI = ""
def doit(self, module, commands=None, package=None, removable=False, gui=False):
# if removable:
# self.app().removableCommands.settings[module] = [commands, package]
# self.app().removableCommands.saveAllSettings()
# If the package is not specified the default is the first library
#global commandslist,cmd_docslist
#import pdb
#pdb.set_trace()
if package is None: package = self.app().libraries[0]
importName = package + '.' + module
try:
# try to execute import Pmv.colorCommands
mod = __import__(importName, globals(), locals(),
[module])
except:
if self.cmdForms.has_key('loadCmds') and \
self.cmdForms['loadCmds'].f.winfo_toplevel().wm_state() == \
'normal':
self.app().errorMsg(sys.exc_info(),
"ERROR: Could not load module %s"%module,
obj=module )
elif self.app().loadModule.cmdForms.has_key('loadModule') and \
self.app().loadModule.cmdForms['loadModule'].f.winfo_toplevel().wm_state() == \
'normal':
self.app().errorMsg(sys.exc_info(),
"ERROR: Could not load module %s"%module,
obj=module)
else:
self.app().errorMsg(sys.exc_info(),
"ERROR: Could not load module %s"%module,
obj=module)
#import traceback
#traceback.print_exc()
if commands is None:
# no particular commmand is asked for, so we try
# to run the initModule
if hasattr(mod,"initModule"):
mod.initModule(self.app(), gui=gui)
elif hasattr(mod, 'commandList'):
for d in mod.commandList:
cmd = d['cmd'].__class__()
self.app().addCommand( cmd, d['name'], None)
elif hasattr(mod, 'commandClassFromName'):
for name, values in mod.commandClassFromName.items():
cmd = values[0]()
self.app().addCommand( cmd, name, None)
else :
raise RuntimeError, "cannot load module %s, missing init"%importName
else: # a single com,mand or a list of commands was given
if isinstance(commands, str):
commands = [commands,]
elif hasattr(mod, 'commandList'):
for cmdName in commands:
found = False
for d in mod.commandList:
if d['name']==cmdName:
cmd = d['cmd'].__class__()
self.app().addCommand( cmd, d['name'], d['gui'])
found = True
break
if not Found:
raise RuntimeError, 'ERROR: cmd %s not found in %s'%(cmdName, importName)
elif hasattr(mod, 'commandClassFromName'):
for cmdName in commands:
values = mod.commandClassFromName.get(cmdName, None)
if values:
cmd = values[0]()
# FIXME gui are instances, that measn that 2 PMV would share
# these instances :(. Lazy loading fixes this since the GUI is
# created independently
gui = values[1]
self.app().addCommand( cmd, cmdName, gui)
else:
raise RuntimeError, 'ERROR: cmd %s not found in %s'%(cmdName, importName)
else :
raise RuntimeError, "cannot load module %s, missing init"%importName
def checkArguments(self, module, commands=None, package=None, **kw):
"""None<---browseCommands(module, commands=None, package=None, **kw)
\nmodule --- name of the module(eg:colorCommands)
\ncommnads --- one list of commands to load
\npackage --- name of the package to which module belongs(eg:Pmv,Vision)
"""
kw['commands'] = commands
kw['package'] = package
return (module,), kw
# the following code should go to the GUI part of the Command
## def buildFormDescr(self, formName):
## import Tkinter, Pmw
## from mglutil.gui.InputForm.Tk.gui import InputFormDescr
## from mglutil.gui.BasicWidgets.Tk.customizedWidgets import kbScrolledListBox
## if not formName == 'loadCmds': return
## idf = InputFormDescr(title='Load Modules and Commands')
## pname = self.app().libraries
## #when Pvv.startpvvCommnads is loaded some how Volume.Pvv is considered
## #as seperate package and is added to packages list in the widget
## #To avoid this packages having '.' are removed
## for p in pname:
## if '.' in p:
## ind = pname.index(p)
## del pname[ind]
## idf.append({'name':'packList',
## 'widgetType':kbScrolledListBox,
## 'wcfg':{'items':pname,
## #'defaultValue':pname[0],
## 'listbox_exportselection':0,
## 'labelpos':'nw',
## 'label_text':'Select a package:',
## #'dblclickcommand':self.loadMod_cb,
## 'selectioncommand':self.displayMod_cb
## },
## 'gridcfg':{'sticky':'wesn'}})
## idf.append({'name':'modList',
## 'widgetType':kbScrolledListBox,
## 'wcfg':{'items':[],
## 'listbox_exportselection':0,
## 'labelpos':'nw',
## 'label_text':'Select a module:',
## #'dblclickcommand':self.loadMod_cb,
## 'selectioncommand':self.displayCmds_cb,
## },
## 'gridcfg':{'sticky':'wesn', 'row':-1}})
## idf.append({'name':'cmdList',
## 'widgetType':kbScrolledListBox,
## 'wcfg':{'items':[],
## 'listbox_exportselection':0,
## 'listbox_selectmode':'extended',
## 'labelpos':'nw',
## 'label_text':'Available commands:',
## #'dblclickcommand':self.loadCmd_cb,
## 'selectioncommand':self.displayCmd_cb,
## },
## 'gridcfg':{'sticky':'wesn', 'row':-1}})
## # idf.append({'name':'docbutton',
## # 'widgetType':Tkinter.Checkbutton,
## # #'parent':'DOCGROUP',
## # 'defaultValue':0,
## # 'wcfg':{'text':'Show documentation',
## # 'onvalue':1,
## # 'offvalue':0,
## # 'command':self.showdoc_cb,
## # 'variable':Tkinter.IntVar()},
## # 'gridcfg':{'sticky':'nw','columnspan':3}})
## idf.append({'name':'DOCGROUP',
## 'widgetType':Pmw.Group,
## 'container':{'DOCGROUP':"w.interior()"},
## 'collapsedsize':0,
## 'wcfg':{'tag_text':'Description'},
## 'gridcfg':{'sticky':'wnse', 'columnspan':3}})
## idf.append({'name':'doclist',
## 'widgetType':kbScrolledListBox,
## 'parent':'DOCGROUP',
## 'wcfg':{'items':[],
## 'listbox_exportselection':0,
## 'listbox_selectmode':'extended',
## },
## 'gridcfg':{'sticky':'wesn', 'columnspan':3}})
## idf.append({'name':'allPacks',
## 'widgetType':Tkinter.Button,
## 'wcfg':{'text':'Show all packages',
## 'command':self.allPacks_cb},
## 'gridcfg':{'sticky':'ew'}})
## idf.append({'name':'loadMod',
## 'widgetType':Tkinter.Button,
## 'wcfg':{'text':'Load selected module',
## 'command':self.loadMod_cb},
## 'gridcfg':{'sticky':'ew', 'row':-1}})
## # idf.append({'name':'loadCmd',
## # 'widgetType':Tkinter.Button,
## # 'wcfg':{'text':'Load Command',
## # 'command':self.loadCmd_cb},
## # 'gridcfg':{'sticky':'ew', 'row':-1}})
## idf.append({'name':'dismiss',
## 'widgetType':Tkinter.Button,
## 'wcfg':{'text':'Dismiss',
## 'command':self.dismiss_cb},
## 'gridcfg':{'sticky':'ew', 'row':-1}})
## # idf.append({'name':'dismiss',
## # 'widgetType':Tkinter.Button,
## # 'wcfg':{'text':'DISMISS',
## # 'command':self.dismiss_cb,
## # },
## # 'gridcfg':{'sticky':Tkinter.E+Tkinter.W,'columnspan':3}})
## return idf
## def guiCallback(self):
## self.app().GUI.ROOT.config(cursor='watch')
## self.app().GUI.ROOT.update()
## if self.allPack == {}:
## self.allPack = findAllVFPackages()
## val = self.showForm('loadCmds', force=1,modal=0,blocking=0)
## ebn = self.cmdForms['loadCmds'].descr.entryByName
## # docb=ebn['docbutton']['widget']
## # var=ebn['docbutton']['wcfg']['variable'].get()
## # if var==0:
## # dg=ebn['DOCGROUP']['widget']
## # dg.collapse()
## self.app().GUI.ROOT.config(cursor='')
## def dismiss_cb(self, event=None):
## self.cmdForms['loadCmds'].withdraw()
## def allPacks_cb(self, event=None):
## ebn = self.cmdForms['loadCmds'].descr.entryByName
## packW = ebn['packList']['widget']
## if not self.allPackFlag:
## packName = self.allPack.keys()
## packW.setlist(packName)
## ebn['allPacks']['widget'].configure(text='Show default packages')
## self.allPackFlag = True
## else:
## packName = self.app().libraries
## packW.setlist(packName)
## ebn['allPacks']['widget'].configure(text='Show all packages')
## self.allPackFlag = False
## ebn['modList']['widget'].clear()
## ebn['cmdList']['widget'].clear()
## def displayMod_cb(self, event=None):
## #print "displayMod_cb"
## # c = self.cmdForms['loadCmds'].mf.cget('cursor')
## # self.cmdForms['loadCmds'].mf.configure(cursor='watch')
## # self.cmdForms['loadCmds'].mf.update_idletasks()
## ebn = self.cmdForms['loadCmds'].descr.entryByName
## # docb=ebn['docbutton']['widget']
## # var=ebn['docbutton']['wcfg']['variable'].get()
## # dg = ebn['DOCGROUP']['widget']
## # dg.collapse()
## packW = ebn['packList']['widget']
## packs = packW.getcurselection()
## if len(packs) == 0:
## return
## packName = packs[0]
## if not self.packMod.has_key(packName):
## package = self.allPack[packName]
## self.packMod[packName] = findModulesInPackage(package,"^def initModule",fileNameFilters=['Command'])
## self.currentPack = packName
## modNames = []
## for key, value in self.packMod[packName].items():
## pathPack = key.split(os.path.sep)
## if pathPack[-1] == packName:
## newModName = map(lambda x: x[:-3], value)
## #for mname in newModName:
## #if "Command" not in mname :
## #ind = newModName.index(mname)
## #del newModName[ind]
## modNames = modNames+newModName
## else:
## pIndex = pathPack.index(packName)
## prefix = join(pathPack[pIndex+1:], '.')
## newModName = map(lambda x: "%s.%s"%(prefix, x[:-3]), value)
## #for mname in newModName:
## #if "Command" not in mname :
## #ind = newModName.index(mname)
## #del newModName[ind]
## modNames = modNames+newModName
## modNames.sort()
## modW = ebn['modList']['widget']
## modW.setlist(modNames)
## # and clear contents in self.libraryGUI
## cmdW = ebn['cmdList']['widget']
## cmdW.clear()
## m = __import__(packName, globals(), locals(),[])
## d = []
## docstring=m.__doc__
## #d.append(m.__doc__)
## docw = ebn['doclist']['widget']
## docw.clear()
## #formatting documentation.
## if docstring!=None :
## if '\n' in docstring:
## x = docstring.split("\n")
## for i in x:
## if i !='':
## d.append(i)
## if len(d)>8:
## docw.configure(listbox_height=8)
## else:
## docw.configure(listbox_height=len(d))
## else:
## x = docstring.split(" ")
## #formatting documenation
## if len(x)>10:
## docw.configure(listbox_height=len(x)/10)
## else:
## docw.configure(listbox_height=1)
## docw.setlist(d)
## # self.cmdForms['loadCmds'].mf.configure(cursor=c)
## #when show documentation on after selcting a package
## #dg is expanded to show documenttation
## #if var==1 and docw.size()>0:
## ##if docw.size()>0:
## ## dg.expand()
## def displayCmds_cb(self, event=None):
## #print "displayCmds_cb"
## global cmd_docslist
## self.cmdForms['loadCmds'].mf.update_idletasks()
## ebn = self.cmdForms['loadCmds'].descr.entryByName
## dg = ebn['DOCGROUP']['widget']
## dg.collapse()
## cmdW = ebn['cmdList']['widget']
## cmdW.clear()
## # docb=ebn['docbutton']['widget']
## # var=ebn['docbutton']['wcfg']['variable'].get()
## modName = ebn['modList']['widget'].getcurselection()
## if modName == (0 or ()): return
## else:
## modName = modName[0]
## importName = self.currentPack + '.' + modName
## try:
## m = __import__(importName, globals(), locals(),['commandList'])
## except:
## return
## if not hasattr(m, 'commandList'):
## return
## cmdNames = map(lambda x: x['name'], m.commandList)
## cmdNames.sort()
## if modName:
## self.var=1
## d =[]
## docstring =m.__doc__
## docw = ebn['doclist']['widget']
## docw.clear()
## if docstring!=None :
## if '\n' in docstring:
## x = docstring.split("\n")
## for i in x:
## if i !='':
## d.append(i)
## #formatting documenation
## if len(d)>8:
## docw.configure(listbox_height=8)
## else:
## docw.configure(listbox_height=len(d))
## else:
## d.append(docstring)
## x = docstring.split(" ")
## #formatting documenation
## if len(x)>10:
## docw.configure(listbox_height=len(x)/10)
## else:
## docw.configure(listbox_height=1)
## docw.setlist(d)
## CmdName=ebn['cmdList']['widget'].getcurselection()
## cmdW.setlist(cmdNames)
## #when show documentation is on after selcting a module or a command
## #dg is expanded to show documenttation
## #if var==1 and docw.size()>0:
## if docw.size()>0:
## dg.expand()
## def displayCmd_cb(self, event=None):
## #print "displayCmd_cb"
## global cmd_docslist
## self.cmdForms['loadCmds'].mf.update_idletasks()
## ebn = self.cmdForms['loadCmds'].descr.entryByName
## dg = ebn['DOCGROUP']['widget']
## dg.collapse()
## # docb=ebn['docbutton']['widget']
## # var=ebn['docbutton']['wcfg']['variable'].get()
## modName = ebn['modList']['widget'].getcurselection()
## if modName == (0 or ()): return
## else:
## modName = modName[0]
## importName = self.currentPack + '.' + modName
## try:
## m = __import__(importName, globals(), locals(),['commandList'])
## except:
## self.warningMsg("ERROR: Cannot find commands for %s"%modName)
## return
## if not hasattr(m, 'commandList'):
## return
## cmdNames = map(lambda x: x['name'], m.commandList)
## cmdNames.sort()
## if modName:
## self.var=1
## d =[]
## docstring =m.__doc__
## import string
## docw = ebn['doclist']['widget']
## docw.clear()
## if docstring!=None :
## if '\n' in docstring:
## x = docstring.split("\n")
## for i in x:
## if i !='':
## d.append(i)
## #formatting documenation
## if len(d)>8:
## docw.configure(listbox_height=8)
## else:
## docw.configure(listbox_height=len(d))
## else:
## d.append(docstring)
## x = docstring.split(" ")
## #formatting documenation
## if len(x)>10:
## docw.configure(listbox_height=len(x)/10)
## else:
## docw.configure(listbox_height=1)
## docw.setlist(d)
## cmdW = ebn['cmdList']['widget']
## CmdName=ebn['cmdList']['widget'].getcurselection()
## cmdW.setlist(cmdNames)
## if len(CmdName)!=0:
## for i in m.commandList:
## if i['name']==CmdName[0]:
## c = i['cmd']
## if CmdName[0] in cmdNames:
## ind= cmdNames.index(CmdName[0])
## cmdW.selection_clear()
## cmdW.selection_set(ind)
## d =[]
## docstring=c.__doc__
## docw = ebn['doclist']['widget']
## docw.clear()
## if CmdName[0] not in cmd_docslist.keys():
## cmd_docslist[CmdName[0]]=d
## import string
## if docstring!=None :
## if '\n' in docstring:
## x = docstring.split("\n")
## for i in x:
## if i !='':
## d.append(i)
## if len(d)>8:
## docw.configure(listbox_height=8)
## else:
## docw.configure(listbox_height=len(d))
## else:
## d.append(docstring)
## x = docstring.split(" ")
## if len(x)>10:
## docw.configure(listbox_height=len(x)/10)
## else:
## docw.configure(listbox_height=1)
## docw.setlist(d)
## #when show documentation is on after selcting a module or a command
## #dg is expanded to show documenttation
## #if var==1 and docw.size()>0:
## if docw.size()>0:
## dg.expand()
## def loadMod_cb(self, event=None):
## ebn = self.cmdForms['loadCmds'].descr.entryByName
## selMod = ebn['modList']['widget'].getcurselection()
## if len(selMod)==0: return
## else:
## self.txtGUI = ""
## apply(self.doitWrapper, ( selMod[0],),
## {'commands':None, 'package':self.currentPack, 'removable':True})
## self.dismiss_cb(None)
## if self.txtGUI:
## self.txtGUI = "\n Access this command via:\n"+self.txtGUI
## import tkMessageBox
## tkMessageBox.showinfo("Load Module", selMod[0]+" loaded successfully!\n"+self.txtGUI)
class loadModuleCommand(AppCommand):
"""Command to load dynamically modules to the App. import the file called name.py and execute the function initModule defined in that file Raises a ValueError exception if initModule is not defined
\nPackage : AppFramework
\nModule : notOptionalCommands.py
\nClass : loadModuleCommand
\nCommand : loadModule
\nSynopsis:\n
None<--loadModule(filename, package=None, **kw)
\nRequired Arguements:\n
filename --- name of the module
\nOptional Arguments:\n
package --- name of the package to which filename belongs
"""
active = 0
def doit(self, filename, package):
# This is NOT called because we call browseCommand()"
if package is None:
_package = filename
else:
_package = "%s.%s"%(package, filename)
try:
mod = __import__( _package, globals(), locals(), ['initModule'])
if hasattr(mod, 'initModule') or not callable(mod.initModule):
mod.initModule(self.app())
else:
self.app().errorMsg(sys.exc_info(), '%s:Module %s does not have initModule function'%(self.name, filename))
except:
self.app().errorMsg(sys.exc_info(), '%s:Module %s could not be imported'%(self.name, _package))
def checkArguments(self, filename, package=None, **kw):
"""None<---loadModule(filename, package=None, **kw)
\nRequired Arguements:\n
filename --- name of the module
\nOptional Arguements:\n
package --- name of the package to which filename belongs
"""
if package==None:
package=self.app().libraries[0]
if not kw.has_key('redraw'):
kw['redraw'] = 0
kw['package'] = package
return (filename,), kw
def loadModules(self, package, library=None):
modNames = []
doc = []
self.filenames={}
self.allPack={}
self.allPack=findAllVFPackages()
if package is None: return [], []
if not self.filenames.has_key(package):
pack=self.allPack[package]
#finding modules in a package
self.filenames[pack] =findModulesInPackage(pack,"^def initModule",fileNameFilters=['Command'])
# dictionary of files keys=widget, values = filename
for key, value in self.filenames[pack].items():
pathPack = key.split(os.path.sep)
if pathPack[-1] == package:
newModName = map(lambda x: x[:-3], value)
#for mname in newModName:
#if not modulename has Command in it delete from the
#modules list
#if "Command" not in mname :
#ind = newModName.index(mname)
#del newModName[ind]
#if "Command" in mname :
if hasattr(newModName,"__doc__"):
doc.append(newModName.__doc__)
else:
doc.append(None)
modNames = modNames + newModName
else:
pIndex = pathPack.index(package)
prefix = join(pathPack[pIndex+1:], '.')
newModName = map(lambda x: "%s.%s"%(prefix, x[:-3]), value)
#for mname in newModName:
#if not modulename has Command in it delete from the
#modules list
#if "Command" not in mname :
#ind = newModName.index(mname)
#del newModName[ind]
if hasattr(newModName,"__doc__"):
doc.append(newModName.__doc__)
else:
doc.append(None)
modNames = modNames + newModName
modNames.sort()
return modNames, doc
# The following code should go to the GUI part of the command
## def loadModule_cb(self, event=None):
## ebn = self.cmdForms['loadModule'].descr.entryByName
## moduleName = ebn['Module List']['widget'].get()
## package = ebn['package']['widget'].get()
## if moduleName:
## self.app().browseCommands(moduleName[0], package=package, redraw=0)
## def package_cb(self, event=None):
## ebn = self.cmdForms['loadModule'].descr.entryByName
## pack = ebn['package']['widget'].get()
## names, docs = self.loadModules(pack)
## w = ebn['Module List']['widget']
## w.clear()
## for n,d in map(None, names, docs):
## w.insert('end', n, d)
## def buildFormDescr(self, formName):
## """create the cascade menu for selecting modules to be loaded"""
## if not formName == 'loadModule':return
## import Tkinter, Pmw
## from mglutil.gui.BasicWidgets.Tk.customizedWidgets import ListChooser
## ifd = InputFormDescr(title='Load command Modules')
## names, docs = self.loadModules(self.app().libraries[0])
## entries = map(lambda x: (x, None), names)
## pname=self.app().libraries
## for p in pname:
## if '.' in p:
## ind = pname.index(p)
## del pname[ind]
## ifd.append({
## 'name':'package',
## 'widgetType': Pmw.ComboBox,
## 'defaultValue': pname[0],
## 'wcfg':{ 'labelpos':'nw', 'label_text':'Package:',
## 'selectioncommand': self.package_cb,
## 'scrolledlist_items':pname
## },
## 'gridcfg':{'sticky':'ew', 'padx':2, 'pady':1}
## })
## ifd.append({'name': 'Module List',
## 'widgetType':ListChooser,
## 'wcfg':{
## 'title':'Choose a module',
## 'entries': entries,
## 'lbwcfg':{'width':27,'height':10},
## 'command':self.loadModule_cb,
## 'commandEvent':"<Double-Button-1>"
## },
## 'gridcfg':{'sticky':Tkinter.E+Tkinter.W}
## })
## ifd.append({'name': 'Load Module',
## 'widgetType':Tkinter.Button,
## 'wcfg':{'text':'Load Module',
## 'command': self.loadModule_cb,
## 'bd':6},
## 'gridcfg':{'sticky':Tkinter.E+Tkinter.W},
## })
## ifd.append({'widgetType':Tkinter.Button,
## 'wcfg':{'text':'Dismiss',
## 'command': self.Dismiss_cb},
## 'gridcfg':{'sticky':Tkinter.E+Tkinter.W}})
## return ifd
## def Dismiss_cb(self):
## self.cmdForms['loadModule'].withdraw()
## self.active = 0
## def guiCallback(self, event=None):
## if self.active: return
## self.active = 1
## form = self.showForm('loadModule', force=1,modal=0,blocking=0)
## form.root.protocol('WM_DELETE_WINDOW',self.Dismiss_cb)
|
StarcoderdataPython
|
5132867
|
<filename>GetWeiboCookies/SelemiumCaptcha.py
import base64
import datetime
import json
import os
import random
import re
import requests
from pymongo.errors import DuplicateKeyError
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from fake_useragent import UserAgent
import sys
import time
sys.path.append(os.getcwd())
from CAPTCHA import weibo
class weibo():
def __init__(self, username, password,proxy):
self.username = username
self.password = password
self.url = 'https://gongyi.sina.com.cn/'
self.wburl = 'https://weibo.com/'
self.codeurl = 'https://api.weibo.com/chat/#/chat'
opt = webdriver.ChromeOptions()
opt.add_argument("–proxy-server=http://%s" % proxy)
self.browser = webdriver.Chrome(
executable_path='C:\\Program Files (x86)\\Google\\Chrome\\Application\\chromedriver.exe',
chrome_options=opt)
self.wait = WebDriverWait(self.browser, 20)
self.browser.maximize_window()
self.browser.get(self.url)
def run(self):
self.CAPTCHA()
while not self.isLogin():
self.CAPTCHA()
print('登陆失败')
time.sleep(random.uniform(2, 2.5))
self.browser.get(self.wburl) # 已登录weibo.com
time.sleep(random.uniform(2, 3))
self.browser.get(self.codeurl)
time.sleep(random.uniform(5, 7))
return_code = self.return_unicode()
return return_code
def CAPTCHA(self):
time.sleep(random.uniform(2, 2.5))
self.browser.find_element_by_xpath('//*[@id="SI_User"]/div[2]/div/a').click()
time.sleep(random.uniform(0.5, 1.2))
username = self.browser.find_element_by_xpath('//*[@id="SI_User"]/div[3]/div[2]/ul/li[2]/input')
password = self.browser.find_element_by_xpath('//*[@id="SI_User"]/div[3]/div[2]/ul/li[3]/input')
username.send_keys(self.username)
password.send_keys(self.password)
time.sleep(random.uniform(2, 2.5))
try:
word = self.get_code()
code = self.browser.find_element_by_xpath('//*[@id="SI_User"]/div[3]/div[2]/ul/li[4]/input')
code.send_keys(word)
time.sleep(random.uniform(2, 2.5))
self.browser.find_element_by_xpath('//*[@id="SI_User"]/div[3]/div[2]/ul/li[6]/span/a').click()
time.sleep(random.uniform(3, 3.5))
try:
x = str(self.browser.find_element_by_xpath('//*[@id="SI_User"]/div[3]/div[2]/p').text)
while len(x):
word = self.get_code()
code.send_keys(word)
time.sleep(random.uniform(2, 2.5))
self.browser.find_element_by_xpath('//*[@id="SI_User"]/div[3]/div[2]/ul/li[6]/span/a').click()
time.sleep(random.uniform(5, 5.5))
x = str(self.browser.find_element_by_xpath('//*[@id="SI_User"]/div[3]/div[2]/p').text)
except:
print('Done')
pass
except:
self.browser.find_element_by_xpath('//*[@id="SI_User"]/div[3]/div[2]/ul/li[6]/span/a').click()
time.sleep(random.uniform(5, 5.5))
def isLogin(self):
try:
ActionChains(self.browser).move_to_element(
self.browser.find_element_by_xpath('//*[@id="SI_User"]/div[2]/div/a')).perform()
print('登陆成功')
return True
except:
return False
def get_code(self):
nodes = self.browser.find_element_by_xpath('//*[@id="SI_User"]/div[3]/div[2]/ul/li[4]/img')
nodes.screenshot('test.png')
return self.get_ai_words(uname='RA1LGUN', pwd='<PASSWORD>', img='test.png')
def get_ai_words(self,uname, pwd, img):
with open(img, 'rb') as f:
base64_data = base64.b64encode(f.read())
b64 = base64_data.decode()
data = {"username": uname, "password": <PASSWORD>, "image": b64}
result = json.loads(requests.post("http://api.ttshitu.com/base64", json=data).text)
if result['success']:
return result["data"]["result"]
else:
return result["message"]
return ""
def return_unicode(self):
self.browser.get(self.codeurl)
time.sleep(random.uniform(5, 5.5))
items = self.browser.find_element_by_xpath('//*[@id="app"]/div/div/div[1]/div[3]/div/div[1]/div/div/ul')
li = items.find_elements_by_tag_name("li")
for i in range(len(li)):
try:
if re.findall('微博安全中心', li[i].text):
print('ok')
time.sleep(random.uniform(4, 5.5))
li[i].click()
time.sleep(random.uniform(4, 5.5))
infos = self.browser.find_element_by_xpath('//*[@id="drag-area"]/div/div[1]/div[2]/div[2]/div[1]/div/ul')
oneinfo = infos.find_elements_by_tag_name("li")
try:
for y in range(1,5):
LoginCode = re.findall('验证码:([0-9]{6})', oneinfo[-y].text)
print(LoginCode)
if LoginCode:
return LoginCode
except:
print('Read Done')
return ''
except:
print('None Centre')
return ''
def return_code(self):
print('return_code')
self.browser.get(self.codeurl)
time.sleep(random.uniform(5, 5.5))
try:
items = self.browser.find_element_by_xpath('//*[@id="app"]/div/div/div[1]/div[3]/div/div[1]/div/div/ul')
LoginCode = re.findall('验证码:([0-9]{6})', items.text)
if LoginCode:
return LoginCode[0]
else:
return ''
except:
return ''
def closebrowser(self):
self.browser.quit()
if __name__ == '__main__':
username = '16563377754'
password = '<PASSWORD>'
proxy_url = requests.get("http://127.0.0.1:5010/get/").json().get("proxy")
x = weibo(username,password,proxy_url)
code = x.run()
time.sleep(random.uniform(2, 4.5))
x.closebrowser()
print(code)
|
StarcoderdataPython
|
5133743
|
<gh_stars>1-10
"""
@Author: huuuuusy
@GitHub: https://github.com/huuuuusy
系统: Ubuntu 18.04
IDE: VS Code 1.36
工具: python == 3.7.3
"""
"""
思路:
和268题目的思路一致,用集合存储数字,然后检索
结果:
执行用时 : 500 ms, 在所有 Python3 提交中击败了23.87%的用户
内存消耗 : 23.8 MB, 在所有 Python3 提交中击败了5.06%的用户
"""
class Solution:
def findDisappearedNumbers(self, nums):
new_nums = set(nums)
result = []
for i in range(1,len(nums)+1):
if i not in new_nums:
result.append(i)
return result
if __name__ == "__main__":
nums = [1,1]
answer = Solution().findDisappearedNumbers(nums)
print(answer)
|
StarcoderdataPython
|
6661814
|
<gh_stars>0
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.db.models import Q
from django.http import JsonResponse
from django.utils.translation import gettext as _
from django.core.exceptions import ObjectDoesNotExist
from django.utils.datastructures import MultiValueDictKeyError
from .forms import FrmProducts
from .models import Products
from users.models import Users
dummy=_('The product already exists')
dummy=_('Please enter a valid product')
def get_products_created_by_user(request):
try:
products = Products.objects.filter(created_by_user=request.user, dropped=False)
return len(products)
except ObjectDoesNotExist:
msg = _('We can not retrieve the number of products created by this user')
msg += '. ' + _('Retry, and if the problem persist get in touch with the system administrator and report') + ': '
msg += 'error at products.views@get_products_created_by_user'
context = {'level': 'error', 'msg': msg}
return render(request, 'dashboard/index.html', context=context)
def add(request):
context = {}
itm_menu = request.GET.get('itm_menu', 'lnk1')
context['itm_menu'] = itm_menu
url='products/add.html'
if request.method == 'GET':
frm = FrmProducts(title=_('Add product'), action='/products/do-add', btn_label=_('Save'), icon_btn_submit='save')
app_version = request.GET['app_version']
context['form'] = frm
context['app_version'] = app_version
counter_products=get_products_created_by_user(request)
limit=100
if app_version==_('Free version'):
if counter_products>=limit:
url='products/add-free-version-limited.html'
elif _('Basic version') in app_version:
if counter_products>=limit:
url='products/add-basic-version-limited.html'
elif _('Pro version') in app_version:
limit=1000
if counter_products>=limit:
url='products/add-pro-version-limited.html'
elif _('Advanced version') in app_version:
limit=10000
if counter_products>=limit:
url='products/add-advanced-version-limited.html'
return render(request, url, context=context)
def find(request):
context = {}
if request.method == 'GET':
itm_menu = request.GET.get('itm_menu', '')
if get_products_created_by_user(request) < 1:
return render(request, 'products/user-have-no-products-created.html', context={'itm_menu': itm_menu})
frm = FrmProducts(title=_('Find product'), action='/products/do-find', btn_label=_('Find'), icon_btn_submit='search')
app_version = request.GET['app_version']
context['form'] = frm
context['itm_menu'] = itm_menu
return render(request, 'products/find.html', context=context)
def do_add(request):
frm = FrmProducts(title=_('Add product'), action='/products/do-add', btn_label=_('Save'), icon_btn_submit='save')
context = {}
context['form'] = frm
context['msg'] = _('The product can not be saved')
context['level'] = 'error'
if request.method == 'POST':
try:
app_version = request.POST['app_version']
itm_menu = request.POST.get('itm_menu', '')
context['itm_menu'] = itm_menu
'''
if app_version == _('Free version'):
if get_products_created_by_user(request) > 0:
return render(request, 'products/add-free-version-limited.html', context={'itm_menu': itm_menu})
'''
context['app_version'] = app_version
# Retrieve the user who is creating the product
#user = User.objects.get(email=request.POST.get('user', None))
user = User.objects.get(username=request.POST.get('user', None))
my_user = Users.objects.get(pk=user.id)
product_name = request.POST.get('name', None)
# Check if the products does not exist
# (same product = product_name)
try:
user = Users.objects.get(pk=request.user)
#objs = Products.objects.filter(name=product_name, created_by_user=user)
'''
if len(objs) > 0:
context['msg'] = _('The product already exist')
context['level'] = 'error'
return render(request, 'products/add.html', context=context)
'''
obj = Products.objects.get(name=product_name, created_by_user=user)
if not obj.dropped:
context['msg'] = _('The product already exist')
context['level'] = 'error'
else:
obj.undrop()
context['msg'] = _('The product has been successfully saved')
context['level'] = 'success'
return render(request, 'products/add.html', context=context)
except ObjectDoesNotExist:
pass
obj = Products(name=product_name, \
created_by_user=my_user)
obj.save()
context['msg'] = _('The product has been successfully saved')
context['level'] = 'success'
except MultiValueDictKeyError:
return redirect('/')
except ObjectDoesNotExist:
return redirect('/')
return render(request, 'products/add.html', context=context)
def __generic_find_view__(request, can_delete=False, can_edit=False, view_all=False):
frm = FrmProducts(title=_('Find product'), action='/products/do-find', btn_label=_('Find'), icon_btn_submit='search')
context = {}
context['msg'] = _('We can not find any product matching with your query options')
context['level'] = 'error'
products = Products.objects.none()
itm_menu = request.POST.get('itm_menu', request.GET.get('itm_menu', ''))
context['itm_menu'] = itm_menu
#context['url_view_all'] = '/products/list-all/'
if request.method == 'POST':
try:
app_version = request.POST.get('app_version', _('Free version'))
#itm_menu = request.POST.get('itm_menu', '')
#context['itm_menu'] = itm_menu
context['app_version'] = app_version
search_by = {
'name__icontains': False
}
product_name = request.POST.get('name', None)
if product_name and product_name is not None and len(product_name.strip()) > 0:
search_by['name__icontains'] = product_name.strip()
# Retrieve the user logged in
#user = User.objects.get(email=request.GET.get('user', ''))
#my_user = Users.objects.get(pk=user.id)
user = Users.objects.get(pk=request.user)
query = Q(created_by_user=user) & Q(dropped=False)
final_search_by = {}
for criteria in search_by:
if search_by[criteria]:
final_search_by[criteria] = search_by[criteria]
# Build the query...
# See https://stackoverflow.com/questions/38131563/django-filter-with-or-condition-using-dict-argument
# for more details
from functools import reduce
import operator
query &= reduce(operator.or_, (Q(**d) for d in [dict([i]) for i in final_search_by.items()]))
products = Products.objects.filter(query)
#context['msg'] = _('We found {0} result(s) matching your query').format(len(products))
#context['level'] = "success"
except MultiValueDictKeyError:
return redirect('/')
except ObjectDoesNotExist:
return redirect('/')
elif view_all:
app_version = request.GET.get('app_version', _('Free version'))
itm_menu = request.GET.get('itm_menu', 'lnk1')
context['itm_menu'] = itm_menu
context['app_version'] = app_version
# Retrieve the user logged in
#user = User.objects.get(email=request.GET.get('user', ''))
user = User.objects.get(username=request.GET.get('user', ''))
my_user = Users.objects.get(pk=user.id)
query = Q(created_by_user=my_user) & Q(dropped=False)
products = Products.objects.filter(query)
if len(products) > 0:
context['products'] = products
context['show_modal'] = True
context['modal_name'] = 'dlgSearchResults'
context['can_delete'] = can_delete
context['can_edit'] = can_edit
context.pop('msg', None)
context.pop('level', None)
if can_edit:
frm = FrmProducts(title=_('Edit product'), action='/products/do-edit', btn_label=_('Find'), icon_btn_submit='search')
elif can_delete:
frm = FrmProducts(title=_('Delete product'), action='/products/do-delete', btn_label=_('Find'), icon_btn_submit='search')
context['form'] = frm
return render(request, 'products/find.html', context=context)
def do_find(request):
return __generic_find_view__(request)
def do_view_all(request):
if request.method == 'GET':
edit = request.GET.get('edit', False)
delete = request.GET.get('delete', False)
return __generic_find_view__(request, view_all=True, can_edit=edit, can_delete=delete)
return __generic_find_view__(request, view_all=True, can_edit=False, can_delete=False)
def edit(request):
context = {}
if request.method == 'GET':
itm_menu = request.GET.get('itm_menu', '')
if get_products_created_by_user(request) < 1:
return render(request, 'products/user-have-no-products-created.html', context={'itm_menu': itm_menu})
frm = FrmProducts(title=_('Edit product'), action='/products/do-edit', btn_label=_('Find'), icon_btn_submit='search')
app_version = request.GET['app_version']
context['form'] = frm
context['itm_menu'] = itm_menu
return render(request, 'products/find.html', context=context)
def do_edit(request):
return __generic_find_view__(request, can_edit=True)
def delete(request):
context = {}
if request.method == 'GET':
itm_menu = request.GET.get('itm_menu', '')
if get_products_created_by_user(request) < 1:
return render(request, 'products/user-have-no-products-created.html', context={'itm_menu': itm_menu})
frm = FrmProducts(title=_('Delete product'), action='/products/do-delete', btn_label=_('Find'), icon_btn_submit='search')
app_version = request.GET['app_version']
context['form'] = frm
context['itm_menu'] = itm_menu
return render(request, 'products/find.html', context=context)
def do_delete(request):
return __generic_find_view__(request, can_delete=True)
def view_details(request):
if request.method == 'GET':
try:
product = request.GET.get('obj', None)
product = Products.objects.get(pk=product)
can_edit = request.GET.get('can_edit', False)
can_delete = request.GET.get('can_delete', False)
itm_menu = request.GET.get('itm_menu', 'lnk1')
context = {
'product': product, 'can_edit': can_edit,
'can_delete': can_delete, 'itm_menu': itm_menu
}
return render(request, 'products/view-details.html', context=context)
except ObjectDoesNotExist:
return redirect('/')
return redirect('/')
def confirm_delete(request):
if request.method == 'GET':
try:
product = request.GET.get('product', None)
product = Products.objects.get(pk=product)
context = {
'product': product,
'can_delete': True
}
return render(request, 'products/view-details.html', context=context)
except ObjectDoesNotExist:
return redirect('/')
return redirect('/')
def delete_product(request):
if request.method == 'POST':
try:
product = request.POST.get('product', None)
product = Products.objects.get(pk=product)
reason = request.POST.get('reason', None)
if len(reason.strip()) < 1:
reason = None
itm_menu = request.POST.get('itm_menu', 'lnk1')
product.drop(reason=reason)
'''
from datetime import datetime
full_time = datetime.now()
product.dropped = True
product.dropped_at = full_time
product.dropped_when = full_time
product.dropped_reason = reason
product.save()
'''
frm = FrmProducts(title=_('Delete product'), action='/products/do-delete', btn_label=_('Find'), icon_btn_submit='search')
#app_version = request.GET['app_version']
#context['form'] = frm
#context['itm_menu'] = itm_menu
context = {
'level': 'success',
'msg': _('The product has been deleted successfully'),
'itm_menu': itm_menu,
'form': frm
}
return render(request, 'products/find.html', context=context)
#return find(request)
#return redirect('/products/find')
except ObjectDoesNotExist:
return redirect('/')
return redirect('/')
def update(request):
if request.method == 'POST':
try:
product = request.POST.get('product', None)
product = Products.objects.get(pk=product)
name = request.POST.get('productname', None)
product.name = name
product.save()
msg = _('The product has been updated successfully')
return JsonResponse({'status': 'success', 'msg': msg})
except ObjectDoesNotExist:
msg = _('The product you are trying to update does not exist')
return JsonResponse({'status': 'error', 'msg': msg})
msg = _('You do not have permission to perform this request')
return JsonResponse({'status': 'error', 'msg': msg})
|
StarcoderdataPython
|
221929
|
<reponame>Dodoliko/XX4
from lib import MemAccess
from lib import offsets
from lib.MemAccess import *
def isValid(addr):
return ((addr >= 0x10000) and (addr < 0x0000001000000000))
def isValidInGame(addr):
return ((addr >= 0x140000000) and (addr < 0x14FFFFFFF))
def numOfZeros(value):
tmp = value
ret = 0;
for i in range(8):
if (((tmp >> (i * 8)) & 0xFF) == 0x00):
ret += 1
return ret
class PointerManager():
badobfus = 0
def __init__(self, pHandle):
self.mem = MemAccess(pHandle)
self.pHandle = pHandle
self.gpumemptr = 0
self.OBFUS_MGR = 0
if offsets.OBFUS_MGR == 0:
offsets.OBFUS_MGR = self.GetObfuscationMgr()
else:
self.OBFUS_MGR = offsets.OBFUS_MGR
@staticmethod
def decrypt_ptr(encptr, key):
# Grab byte at location
def GRAB_BYTE(x, n):
return (x >> (n * 8)) & 0xFF
ret = 0
subkey = (key ^ ((5 * key) % (2 ** 64))) % (2 ** 64)
for i in range(7):
y = GRAB_BYTE(subkey, i)
subkey += 8
t1 = (y * 0x3B) % (2 ** 8)
t2 = (y + GRAB_BYTE(encptr, i)) % (2 ** 8)
ret |= (t2 ^ t1) << (i * 8)
ret |= GRAB_BYTE(encptr, 7) << 56
ret &= 0x7FFFFFFFFFFFFFFF
return ret
def GetObfuscationMgr(self):
api._cache_en = False
print("[+] Searching for ObfuscationMgr...")
addr = -1
OM = 0
ss = StackAccess(self.pHandle, self.mem[offsets.PROTECTED_THREAD].read_uint32(0))
while (1):
addr = -1
time.sleep(0.01)
buf = ss.read()
for i in range(0, len(buf), 8):
testptr = int.from_bytes(buf[i:i + 8], "little")
if (isValid(testptr)):
if self.mem[testptr].read_uint64(0x0) == offsets.OBFUS_MGR_PTR_1:
OM = testptr
self.OBFUS_MGR = testptr
break
if (OM > 0): break
ss.close()
print("[+] Found ObfuscationMgr @ 0x%08x " % (OM))
api._cache_en = True
return OM
def GetDx11Secret(self):
def TestDx11Secret(self, testkey):
mem = self.mem
typeinfo = offsets.ClientStaticModelEntity
flink = mem[typeinfo].read_uint64(0x88)
ObfManager = self.OBFUS_MGR
HashTableKey = mem[typeinfo](0).me() ^ mem[ObfManager].read_uint64(0xE0)
hashtable = ObfManager + 0x78
EncryptionKey = self.hashtable_find(hashtable, HashTableKey)
if (EncryptionKey == 0):
return 0
EncryptionKey ^= testkey
ptr = PointerManager.decrypt_ptr(flink, EncryptionKey)
if (isValid(ptr)):
return True
else:
return False
api._cache_en = False
if (TestDx11Secret(self, offsets.Dx11Secret)):
api._cache_en = True
return offsets.Dx11Secret
if offsets.GPUMemPtr:
for offset in range(0, 0x400, 0x100):
testptr = self.mem[offsets.GPUMemPtr].read_uint64(offset)
if (testptr):
if (TestDx11Secret(self, testptr)):
if (testptr != offsets.Dx11Secret):
print("[+] Found Dx11 key scraping GPU mem @ 0x%x" % (offsets.GPUMemPtr + offset))
offsets.Dx11Secret = testptr
api._cache_en = True
return offsets.Dx11Secret
offsets.GPUMemPtr = 0
ss = StackAccess(self.pHandle, self.mem[offsets.PROTECTED_THREAD].read_uint32(0))
if (self.mem[self.OBFUS_MGR].read_uint64(0x100) != 0):
addr = -1
OM = 0
i = 0
print("[+] Locating initial Dx11 key location, please wait...", flush=True)
while (1):
addr = -1
buf = ss.read()
addr = buf.find((offsets.OBFUS_MGR_RET_1).to_bytes(8, byteorder='little'))
while (addr > -1):
i = 0x38
gpumem = int.from_bytes(buf[addr + i:addr + i + 8], "little")
testptr = self.mem[gpumem].read_uint64(0x0)
if (TestDx11Secret(self, testptr)):
if (testptr != offsets.Dx11Secret):
offsets.GPUMemPtr = gpumem & 0xFFFFFFFFFFFFFC00
print("[+] Found Initial Dx11 key scraping GPU mem @ 0x%x" % (offsets.GPUMemPtr),
flush=True)
offsets.Dx11Secret = testptr
api._cache_en = True
ss.close()
return offsets.Dx11Secret
addr = buf.find((offsets.OBFUS_MGR_RET_1).to_bytes(8, byteorder='little'), addr + 8)
else:
offsets.Dx11Secret = 0
api._cache_en = True
ss.close()
return 0
def CheckCryptMode(self):
api._cache_en = False
DecFunc = self.mem[self.OBFUS_MGR].read_uint64(0xE0) ^ self.mem[self.OBFUS_MGR].read_uint64(0xF8)
Dx11EncBuffer = self.mem[self.OBFUS_MGR].read_uint64(0x100)
if ((Dx11EncBuffer != 0) and (offsets.Dx11EncBuffer != Dx11EncBuffer)):
self.GetDx11Secret()
print("[+] Dynamic key loaded, root key set to 0x%x" % (offsets.Dx11Secret))
offsets.Dx11EncBuffer = Dx11EncBuffer
offsets.CryptMode = 1
elif (offsets.CryptMode == 0):
if ((DecFunc == offsets.OBFUS_MGR_DEC_FUNC) and (Dx11EncBuffer != 0)):
self.GetDx11Secret()
print("[+] Dynamic key loaded, retrieving key...")
offsets.Dx11EncBuffer = Dx11EncBuffer
offsets.CryptMode = 1
elif (offsets.CryptMode == 1):
if (DecFunc != offsets.OBFUS_MGR_DEC_FUNC):
offsets.Dx11Secret = 0x598447EFD7A36912
print("[+] Static key loaded, root key set to 0x%x" % (offsets.Dx11Secret))
offsets.CryptMode = 0
self.gpumemptr = 0
api._cache_en = True
def hashtable_find(self, table, key):
mem = self.mem
bucketCount = mem[table].read_uint32(0x10)
if (bucketCount == 0):
#print("bucket zero")
return 0
elemCount = mem[table].read_uint32(0x14)
startcount = key % bucketCount
node = mem[table](0x8)(0x8 * startcount).me()
if (node == 0):
#print ("node zero")
return 0
while 1:
first = mem[node].read_uint64(0x0)
second = mem[node].read_uint64(0x8)
#next = mem[node].read_uint64(0x16)
next = mem[node].read_uint64(0x10)
if first == key:
#print ("Key: 0x%016x Node: 0x%016x"%(key^ mem[self.OBFUS_MGR].read_uint64(0xE0),node))
return second
elif (next == 0):
#print("next 0 for node 0x%16x" % node)
return 0
node = next
if node > 0x1000000000000000:
#something wrong?
#print ("Bad obfus for node 0x%016x", node)
offsets.badobfus = self.OBFUS_MGR
self.OBFUS_MGR = 0
offsets.OBFUS_MGR = 0
print("badobfus: 0x%x" % offsets.badobfus)
return 0
def GetLocalPlayer(self):
self.CheckCryptMode()
mem = self.mem
ClientPlayerManager = mem[offsets.CLIENT_GAME_CONTEXT](0).read_uint64(0x60)
ObfManager = self.OBFUS_MGR
LocalPlayerListXorValue = mem[ClientPlayerManager].read_uint64(0xF8)
LocalPlayerListKey = LocalPlayerListXorValue ^ mem[ObfManager].read_uint64(0xE0)
hashtable = ObfManager + 0x10
EncryptedPlayerManager = self.hashtable_find(hashtable, LocalPlayerListKey)
if (EncryptedPlayerManager == 0):
return 0
MaxPlayerCount = mem[EncryptedPlayerManager].read_uint32(0x18)
if (MaxPlayerCount != 1):
return 0
XorValue1 = mem[EncryptedPlayerManager].read_uint64(0x20) ^ mem[EncryptedPlayerManager].read_uint64(0x8)
XorValue2 = mem[EncryptedPlayerManager].read_uint64(0x10) ^ offsets.Dx11Secret
LocalPlayer = mem[XorValue2].read_uint64(0) ^ XorValue1
return LocalPlayer
def GetPlayerById(self, id):
self.CheckCryptMode()
mem = self.mem
ClientPlayerManager = mem[offsets.CLIENT_GAME_CONTEXT](0).read_uint64(0x60)
ObfManager = self.OBFUS_MGR
PlayerListXorValue = mem[ClientPlayerManager].read_uint64(0x100)
PlayerListKey = PlayerListXorValue ^ mem[ObfManager].read_uint64(0xE0)
hashtable = ObfManager + 0x10
EncryptedPlayerManager = self.hashtable_find(hashtable, PlayerListKey)
if (EncryptedPlayerManager == 0):
return 0
MaxPlayerCount = mem[EncryptedPlayerManager].read_uint32(0x18)
if (MaxPlayerCount != 70):
return 0
XorValue1 = mem[EncryptedPlayerManager].read_uint64(0x20) ^ mem[EncryptedPlayerManager].read_uint64(0x8)
XorValue2 = mem[EncryptedPlayerManager].read_uint64(0x10) ^ offsets.Dx11Secret
ClientPlayer = mem[XorValue2].read_uint64(0x8 * id) ^ XorValue1
return ClientPlayer
def GetSpectatorById(self, id):
self.CheckCryptMode()
mem = self.mem
ClientPlayerManager = mem[offsets.CLIENT_GAME_CONTEXT](0).read_uint64(0x60)
ObfManager = self.OBFUS_MGR
PlayerListXorValue = mem[ClientPlayerManager].read_uint64(0xF0)
PlayerListKey = PlayerListXorValue ^ mem[ObfManager].read_uint64(0xE0)
hashtable = ObfManager + 0x10
EncryptedPlayerManager = self.hashtable_find(hashtable, PlayerListKey)
if (EncryptedPlayerManager == 0):
return 0
MaxPlayerCount = mem[EncryptedPlayerManager].read_uint32(0x18)
if (MaxPlayerCount == 0) or (id >= MaxPlayerCount):
return 0
XorValue1 = mem[EncryptedPlayerManager].read_uint64(0x20) ^ mem[EncryptedPlayerManager].read_uint64(0x8)
XorValue2 = mem[EncryptedPlayerManager].read_uint64(0x10) ^ offsets.Dx11Secret
ClientPlayer = mem[XorValue2].read_uint64(0x8 * id) ^ XorValue1
return ClientPlayer
def GetEntityKey(self, PointerKey):
self.CheckCryptMode()
mem = self.mem
ObfManager = self.OBFUS_MGR
HashTableKey = PointerKey ^ mem[ObfManager].read_uint64(0xE0)
hashtable = ObfManager + 0x78
EncryptionKey = self.hashtable_find(hashtable, HashTableKey)
if (EncryptionKey == 0):
print ("encryptionkey = 0")
return 0
EncryptionKey ^= offsets.Dx11Secret
return EncryptionKey
def DecryptPointer(self, EncPtr, PointerKey):
self.CheckCryptMode()
if not (EncPtr & 0x8000000000000000):
return 0
mem = self.mem
ObfManager = self.OBFUS_MGR
HashTableKey = PointerKey ^ mem[ObfManager].read_uint64(0xE0)
hashtable = ObfManager + 0x78
EncryptionKey = self.hashtable_find(hashtable, HashTableKey)
if (EncryptionKey == 0):
return 0
EncryptionKey ^= offsets.Dx11Secret
return PointerManager.decrypt_ptr(EncPtr, EncryptionKey)
|
StarcoderdataPython
|
58931
|
import numpy as np
from PySide import QtGui, QtCore
import sharppy.sharptab as tab
from sharppy.sharptab.constants import *
## Written by <NAME> - OU School of Meteorology
## and <NAME> - CIMMS
__all__ = ['backgroundWatch', 'plotWatch']
class backgroundWatch(QtGui.QFrame):
'''
Draw the background frame and lines for the watch plot frame
'''
def __init__(self):
super(backgroundWatch, self).__init__()
self.initUI()
def initUI(self):
## window configuration settings,
## sich as padding, width, height, and
## min/max plot axes
self.lpad = 0; self.rpad = 0
self.tpad = 0; self.bpad = 20
self.wid = self.size().width() - self.rpad
self.hgt = self.size().height() - self.bpad
self.tlx = self.rpad; self.tly = self.tpad
self.brx = self.wid; self.bry = self.hgt
if self.physicalDpiX() > 75:
fsize = 10
else:
fsize = 12
self.title_font = QtGui.QFont('Helvetica', fsize)
self.plot_font = QtGui.QFont('Helvetica', fsize)
self.title_metrics = QtGui.QFontMetrics( self.title_font )
self.plot_metrics = QtGui.QFontMetrics( self.plot_font )
self.title_height = self.title_metrics.height()
self.plot_height = self.plot_metrics.height()
self.plotBitMap = QtGui.QPixmap(self.width(), self.height())
self.plotBitMap.fill(QtCore.Qt.black)
self.plotBackground()
def resizeEvent(self, e):
'''
Handles the event the window is resized
'''
self.initUI()
def draw_frame(self, qp):
'''
Draw the background frame.
qp: QtGui.QPainter object
'''
## set a new pen to draw with
pen = QtGui.QPen(QtCore.Qt.white, 2, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.setFont(self.title_font)
## draw the borders in white
qp.drawLine(self.tlx, self.tly, self.brx, self.tly)
qp.drawLine(self.brx, self.tly, self.brx, self.bry)
qp.drawLine(self.brx, self.bry, self.tlx, self.bry)
qp.drawLine(self.tlx, self.bry, self.tlx, self.tly)
y1 = self.bry / 13.
pad = self.bry / 100.
rect0 = QtCore.QRect(0, pad*4, self.brx, self.title_height)
qp.drawText(rect0, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, 'Psbl Haz. Type')
pen = QtGui.QPen(QtCore.Qt.white, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.drawLine(0, pad*4 + (self.title_height + 3), self.brx, pad*4 + (self.title_height + 3))
def plotBackground(self):
qp = QtGui.QPainter()
qp.begin(self.plotBitMap)
#qp.setRenderHint(qp.Antialiasing)
#qp.setRenderHint(qp.TextAntialiasing)
## draw the frame
self.draw_frame(qp)
qp.end()
class plotWatch(backgroundWatch):
'''
Plot the data on the frame. Inherits the background class that
plots the frame.
'''
def __init__(self):
super(plotWatch, self).__init__()
self.prof = None
def setProf(self, prof):
self.prof = prof
self.watch_type = self.prof.watch_type
self.watch_type_color = self.prof.watch_type_color
self.clearData()
self.plotBackground()
self.plotData()
self.update()
def resizeEvent(self, e):
'''
Handles when the window is resized
'''
super(plotWatch, self).resizeEvent(e)
self.plotData()
def paintEvent(self, e):
'''
Handles painting on the frame
'''
## this function handles painting the plot
super(plotWatch, self).paintEvent(e)
## create a new painter obkect
qp = QtGui.QPainter()
qp.begin(self)
## end the painter
qp.drawPixmap(0,0,self.plotBitMap)
qp.end()
def clearData(self):
'''
Handles the clearing of the pixmap
in the frame.
'''
self.plotBitMap = QtGui.QPixmap(self.width(), self.height())
self.plotBitMap.fill(QtCore.Qt.black)
def plotData(self):
if self.prof is None:
return
qp = QtGui.QPainter()
qp.begin(self.plotBitMap)
qp.setRenderHint(qp.Antialiasing)
qp.setRenderHint(qp.TextAntialiasing)
pen = QtGui.QPen(QtGui.QColor(self.watch_type_color), 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.setFont(self.plot_font)
centery = self.bry / 2.
rect0 = QtCore.QRect(0, centery, self.brx, self.title_height)
qp.drawText(rect0, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, self.watch_type)
qp.end()
|
StarcoderdataPython
|
3397982
|
<reponame>ebezzam/snips-workshop-macos
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from hermes_python.hermes import Hermes
import pyowm
import io
INTENT_HOW_ARE_YOU = "bezzam:how_are_you"
INTENT_GOOD = "bezzam:feeling_good"
INTENT_BAD = "bezzam:feeling_bad"
INTENT_ALRIGHT = "bezzam:feeling_alright"
GET_TEMPERATURE = "bezzam:get_temperature"
API_KEY = 'api_key'
INTENT_FILTER_FEELING = [INTENT_GOOD, INTENT_BAD, INTENT_ALRIGHT]
def main():
owm = pyowm.OWM(API_KEY)
with Hermes("localhost:1883") as h:
h.owm = owm
h.subscribe_intent(INTENT_HOW_ARE_YOU, how_are_you_callback) \
.subscribe_intent(INTENT_GOOD, feeling_good_callback) \
.subscribe_intent(INTENT_BAD, feeling_bad_callback) \
.subscribe_intent(INTENT_ALRIGHT, feeling_alright_callback) \
.subscribe_intent(GET_TEMPERATURE, get_temperature_callback) \
.start()
def get_temperature_callback(hermes, intent_message):
session_id = intent_message.session_id
city = intent_message.slots.city.first().value
try:
observation = hermes.owm.weather_at_place(city)
w = observation.get_weather()
temp = w.get_temperature('celsius')["temp"]
response = "It's {} degrees in {}.".format(temp, city)
except:
response = "You asked for the temperature in {}.".format(city)
hermes.publish_end_session(session_id, response)
def how_are_you_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "I'm doing great. How about you?"
hermes.publish_continue_session(session_id, response, INTENT_FILTER_FEELING)
def feeling_good_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "That's awesome! I'm happy to hear that."
hermes.publish_end_session(session_id, response)
def feeling_bad_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "Sorry to hear that. I hope you feel better soon."
hermes.publish_end_session(session_id, response)
def feeling_alright_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "That's cool."
hermes.publish_end_session(session_id, response)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
8137496
|
<gh_stars>1-10
from datetime import datetime
import subprocess
import requests
import json
import re
COMMAND = ['fail2ban-client', 'status', 'ufw-port-scan']
API_URL = 'https://api.github.com/gists/'
GIST_ID = '000'
USER_NAME = '000'
PAT = '<PASSWORD>'
LOG_FILE_NAME = '000'
now = datetime.now()
now_str = str(now.year) + '-' + str(now.month) + '-' + str(now.day) + ' ' + str(now.hour) + ':' + str(now.minute)
# Execute command and process output
proc = subprocess.Popen(COMMAND, stdout=subprocess.PIPE)
lines = proc.stdout.read().decode().split('\n')
bannums = []
for l in lines:
if not re.search(r'Total', l) is None:
bannums.append(l.split('\t')[1])
log_line = now_str + ',' + str(bannums[0]) + ',' + str(bannums[1]) + '\n'
# write_path = sys.argv[1]
# with open(write_path, 'a') as f:
# f.write(log_line)
# Get old content
gist_url = API_URL + GIST_ID
response = json.loads(requests.get(gist_url).text)
# Post new content
updated_content = response['files'][LOG_FILE_NAME]['content'] + log_line
data_to_post = {'files': {LOG_FILE_NAME: {'content': updated_content}}}
patch_response = requests.patch(gist_url, data=json.dumps(data_to_post), auth=(USER_NAME, PAT))
print(patch_response.text)
|
StarcoderdataPython
|
3201968
|
<reponame>chrisbahnsen/aau-rainsnow-eval
import json
import os
import copy
from Evaluate import cocoTools
sourceJsonPath = './rainSnowGt.json'
destDir = ''
with open('./splitSequenceTranslator.json') as f:
splitSequenceTranslator = json.load(f)
# List rain removal methods here
methods = ['baseline',
'Fu2017',
'GargNayar/Median',
'GargNayar/STCorr',
'IDCGAN',
'Kang2012',
'Kim2015-blur']
with open(sourceJsonPath, 'r') as f:
sourceGt = json.load(f)
for method in methods:
methodGt = copy.deepcopy(sourceGt)
removedImageIds = dict()
if 'images' in methodGt:
images = []
imageList = methodGt['images']
for image in imageList:
methodImage = image
imageNumber = image['file_name'].split('-')[-1]
number = int(imageNumber.replace('.png',''))
if number >= 40 and number <= 5990:
scene = image['file_name'].split('/')[0]
sequence = image['file_name'].split('/')[1]
if 'baseline' not in method:
newMethodPath = os.path.join(destDir,
scene,
sequence,
method,
imageNumber)
methodImage['file_name'] = newMethodPath
images.append(methodImage)
else:
removedImageIds[image['id']] = image['id'] # Does't really matter what the entry is, only interested in key
if 'annotations' in methodGt:
annotations = []
for annotation in methodGt['annotations']:
if annotation['image_id'] not in removedImageIds:
annotations.append(annotation)
else:
print("Removed annotation " + str(annotation['id']) + ' for image ' + str(annotation['image_id']))
methodGt['images'] = images
methodGt['annotations'] = annotations
# Also make sure to remove the annotations at the removed image ID's
outputPath = os.path.join(destDir, method.replace('/', '-') + '.json')
with open(os.path.join(destDir, method.replace('/', '-') + '.json'), 'w') as f:
json.dump(methodGt, f)
cocoTools.removeInstancesInsideDontCare('P:/Private/Traffic safety/Data/RainSnow/PixelLevelAnnotationsOriginal',
outputPath,
splitSequenceTranslator,
destDir)
|
StarcoderdataPython
|
9630054
|
from django import forms
from django.core import validators
class SignUpForm(forms.Form):
firstName = forms.CharField(min_length=3, max_length=20, widget=forms.TextInput(attrs={'class':'form-control form-input', 'placeholder': 'John'}))
lastName = forms.CharField(min_length=3, max_length=20, label='Last Name: ', widget=forms.TextInput(attrs={'class':'form-control form-input', 'placeholder': 'Doe'}))
email = forms.EmailField(validators=[validators.MaxLengthValidator(50)], label='Email: ', widget=forms.TextInput(attrs={'class':'form-control form-input', 'placeholder': '<EMAIL>'}))
password = forms.CharField(min_length=8, max_length=32, widget=forms.PasswordInput(attrs={'class':'form-control form-input'}))
repassword = forms.CharField(min_length=8, max_length=32, label="Confirm Password", widget=forms.PasswordInput(attrs={'class':'form-control form-input'}))
def clean(self):
cleaned_data = super(regForm, self).clean()
valPass = cleaned_data.get('password')
valRePass = cleaned_data.get('repassword')
if valPass != valRePass:
self.add_error('password','Passwords did not match.')
class LoginForm(forms.Form):
email = forms.EmailField(max_length=50, label='Email: ', widget=forms.TextInput(attrs={'class':'form-control form-input', 'placeholder': '<EMAIL>'}))
password = forms.CharField(min_length=8, max_length=32, widget=forms.PasswordInput(attrs={'class':'form-control form-input'}))
class DateInput(forms.DateInput):
input_type = 'date'
class profileForm(forms.Form):
email = forms.EmailField(validators=[validators.MaxLengthValidator(50)], label='Email: ', widget=forms.TextInput(attrs={'class': 'form-control form-input col-md-3', 'placeholder': '<EMAIL>'}))
alternateEmail = forms.EmailField(required=False, validators=[validators.MaxLengthValidator(50)], label='Alternate Email: ', widget=forms.TextInput(attrs={'class': 'form-control form-input', 'placeholder': '<EMAIL>'}))
mobile = forms.IntegerField(required=False, validators=[validators.MaxLengthValidator(10), validators.MinLengthValidator(10)],min_value=6000000000, max_value=9999999999,widget=forms.TextInput(attrs={'class': 'form-control form-input', 'placeholder': '9012345678'}))
firstName = forms.CharField(label='First Name: ',min_length=3, max_length=20, widget=forms.TextInput(attrs={'class': 'form-control form-input', 'placeholder': 'John'}))
#middleName = forms.CharField(min_length=3, max_length=20, label='Middle Name: ', required=False, widget=forms.TextInput(attrs={'class':'form-control form-input', 'placeholder': 'Ivy'}))
lastName = forms.CharField(min_length=3, max_length=20, label='Last Name: ', widget=forms.TextInput(attrs={'class': 'form-control form-input col-md-3', 'placeholder': 'Doe'}))
dob = forms.DateField(required=False,label='Date of Birth: ', widget=DateInput(attrs={'class': 'form-control form-input'}))
CHOICES = [(0,'Male'),(1,'Female'),(2,'Other')]
gender= forms.IntegerField(required=False, label='Gender: ', widget=forms.RadioSelect(choices=CHOICES, attrs={'class': 'custom-control custom-radio custom-control-inline'}))
profilePic = forms.ImageField(required=False, label='Upload Profile Picture')
class resetPasswordForm(forms.Form):
oldPassword = forms.CharField(label='Old Password: ',min_length=8, max_length=32, widget=forms.PasswordInput(attrs={'class':'form-control form-input'}))
newPassword = forms.CharField(label='New Password: ', min_length=8, max_length=32, widget=forms.PasswordInput(attrs={'class':'form-control form-input'}))
newRePassword = forms.CharField(label='Confirm Password: ', min_length=8, max_length=32, widget=forms.PasswordInput(attrs={'class':'form-control form-input'}))
def clean(self):
cleaned_data = super(resetPasswordForm, self).clean()
valPass = cleaned_data.get('newPassword')
valRePass = cleaned_data.get('newRePassword')
if valPass != valRePass:
self.add_error('newPassword','Passwords did not match.')
class forgotPasswordForm(forms.Form):
email = forms.EmailField(max_length=50, label='Email: ', widget=forms.TextInput(attrs={'class':'form-control form-input', 'placeholder': '<EMAIL>'}))
|
StarcoderdataPython
|
183845
|
# -*- coding: utf-8 -*-
"""----------------------------------------------------------------------------
Author:
fengfan
<EMAIL>
Date:
2017/1/10
Description:
Sunshine RPC Module
History:
2017/1/10, create file.
----------------------------------------------------------------------------"""
import sys
import uuid
is_py2 = sys.version[0] == '2'
if is_py2:
from Queue import Queue, Empty
else:
from queue import Queue, Empty
import msgpack
import zmq
import threading
import time
CMD_CREATE_SERVER = 1
CMD_CREATE_CLIENT = 2
CMD_SERVER_QUIT = 3
CMD_GET_SERVER_NODE = 4
_pack = lambda dat: msgpack.packb(dat, use_bin_type=True)
_unpack = lambda dat: msgpack.unpackb(dat, encoding="utf-8")
_zmq_context = None
_zmq_poll = None
_zmq_cmd_sock = None
_cmd_thread = None
_socket_thread = None
_project = None
_err_handler = None
class SSRPC_ERR(object):
CONNECT_FAILED = 1 # 连接失败
def _err_msg(code, msg):
if _err_handler:
_err_handler(code, msg)
else:
# import traceback
# traceback.print_stack()
print("SSRPC errno:%d msg:%s"%(code, msg))
def _hex_str(tstr):
return ":".join(hex(x if isinstance(x, int) else ord(x))[2:] for x in tstr)
_cmd_queue = Queue()
def cmd_loop(cmd_q, cmd_sock):
while True:
cmd = cmd_q.get()
if isinstance(cmd, int): # 退出
break
cmd_sock.send(_pack(cmd[0]))
msg = cmd_sock.recv()
msg = _unpack(msg)
if cmd[1]:
cmd[1](msg)
# break out here
cmd_sock.close()
_HEARTBEAT_SEC = 5
_HEARTBEAT_DATA = b'_ss_inner_heartbeat'
hb_time = 0
_sock_list = {}
def socket_loop(poller):
hb_time = time.time() + _HEARTBEAT_SEC
while True:
socks = dict(poller.poll(_HEARTBEAT_SEC/2*1000))
for s in socks:
handler = _sock_list.get(s)
if not handler:
print(s, "has no handler???")
continue
if handler.is_server:
msgs = s.recv_multipart() # 只有两段,[ router对client分配的id,数据 ],目前不允许发多段数据
handler._on_multi_data(msgs)
else:
msg = s.recv()
handler._on_raw_data(msg)
if hb_time < time.time(): # 发送心跳包
hb_time = time.time() + _HEARTBEAT_SEC
for handler in _sock_list.values():
handler.send_heartbeat()
def socket_tick():
poller = _zmq_poll
socks = dict(poller.poll(_HEARTBEAT_SEC / 2))
for s in socks:
handler = _sock_list.get(s)
if not handler:
print(s, "has no handler???")
continue
if handler.is_server:
msgs = s.recv_multipart() # 只有两段,[ router对client分配的id,数据 ],目前不允许发多段数据
handler._on_multi_data(msgs)
else:
msg = s.recv()
handler._on_raw_data(msg)
global hb_time
if hb_time < time.time(): # 发送心跳包
hb_time = time.time() + _HEARTBEAT_SEC
for handler in _sock_list.values():
handler.send_heartbeat()
def cmd_tick():
cmd_q = _cmd_queue
cmd_sock = _zmq_cmd_sock
try:
cmd = cmd_q.get(False)
except Empty:
return
if isinstance(cmd, int): # 退出
return
cmd_sock.send(_pack(cmd[0]))
msg = cmd_sock.recv()
msg = _unpack(msg)
if cmd[1]:
cmd[1](msg)
_mode = 0
MAIN_THREAD_TICK = 0x1
MAIN_THREAD_SOCKET = 0x10
MAIN_THREAD_EVERYTHING = MAIN_THREAD_TICK | MAIN_THREAD_SOCKET
_callback_queue = Queue()
def init(projectID, mode=0, err_handler=None, addr="tcp://172.16.31.10:5560"):
"""
初始化SSRPC模块
:param projectID: 你的项目代号,如G68
:param mode: 启动模式,目前支持:
MAIN_THREAD_TICK:由宿主来定时调用tick处理回调;否则将在socket线程自动回调;
MAIN_THREAD_SOCKET: 由宿主来定时调用socket tick; 否则在子线程进行loop
:param err_handler: 错误处理函数,格式:err_handler(code, msg)
:param addr: broker地址,默认可不填写
"""
global _zmq_context
global _zmq_poll
global _zmq_cmd_sock
global _cmd_thread
global _socket_thread
global _project
global _mode
global _err_handler
assert projectID
_project = projectID
_mode = mode
_err_handler = err_handler
_zmq_context = zmq.Context()
_zmq_cmd_sock = _zmq_context.socket(zmq.REQ)
_zmq_cmd_sock.connect(addr)
_zmq_poll = zmq.Poller()
if not mode & MAIN_THREAD_SOCKET:
_cmd_thread = threading.Thread(target=cmd_loop, args=(_cmd_queue, _zmq_cmd_sock))
_cmd_thread.daemon = True
_cmd_thread.start()
_socket_thread = threading.Thread(target=socket_loop, args=(_zmq_poll,))
_socket_thread.daemon = True
# _socket_thread.start()
if not is_py2:
def _convert_bytes_to_str(data):
if isinstance(data, bytes):
return data.decode()
elif isinstance(data, (tuple, list)):
return [_convert_bytes_to_str(x) for x in data]
elif isinstance(data, dict):
new_dict = {}
for k, v in data.items():
new_dict[_convert_bytes_to_str(k)] = _convert_bytes_to_str(v)
return new_dict
else:
return data
def _send_cmd(data, cb=None):
_cmd_queue.put([data, cb])
def _setup_cb(handler, new_addr):
global _sock_list
if not new_addr:
_err_msg(SSRPC_ERR.CONNECT_FAILED, "try connect to %s failed" % handler._to_server_id)
return
new_sock = _zmq_context.socket(zmq.DEALER)
new_sock.connect(new_addr)
_zmq_poll.register(new_sock, zmq.POLLIN)
if not(_mode & MAIN_THREAD_SOCKET) and (not _socket_thread.is_alive()):
_socket_thread.start()
_sock_list[new_sock] = handler
# print(handler)
handler.set_zmq_socket(new_sock)
handler.on_init_finish(True)
print("create", "server" if handler.is_server else "client", "done ->", new_addr)
if not handler.is_server:
try:
import Atmosphere
Atmosphere.EventParasiteConnected()
except Exception as e:
print(e)
SERV_MODE_ALL_GET_MSG = 0x1
def setup_server(server_ids, handler, mode=0):
if not isinstance(server_ids, (tuple, list)):
server_ids = [server_ids]
data = {
"server_ids": server_ids,
"cmd": CMD_CREATE_SERVER,
"proj": _project,
"mode": mode,
}
handler._my_id = server_ids[0]
_send_cmd(data, lambda msg: _setup_cb(handler, msg["rtn"]))
def setup_client(server_id, handler):
handler._to_server_id = server_id
import uuid
handler._my_id = uuid.uuid4()
data = {
"server_id":server_id,
"cmd":CMD_CREATE_CLIENT,
"proj":_project,
}
_send_cmd(data, lambda msg: _setup_cb(handler, msg["rtn"]))
def get_server_node_info(project_id, callback):
data = {
"projectID": project_id,
"cmd": CMD_GET_SERVER_NODE,
}
_send_cmd(data, lambda msg: callback(project_id, msg["rtn"]))
def _recv_node_cb(project_id, rtn):
print("broker servers(project id: %s):" % project_id)
for serv, ports in rtn.items():
print("server id: %s, node ports: %s" % (serv, ports))
def tick():
if _mode & MAIN_THREAD_TICK:
while _callback_queue.qsize() > 0:
cb = _callback_queue.get()
cb[0]._exec_func(cb[1], cb[2])
if _mode & MAIN_THREAD_SOCKET:
socket_tick()
cmd_tick()
def stop():
pass
# global _zmq_poll
# global _sock_list
# _cmd_thread.join()
# _socket_thread.join()
# _zmq_poll.close()
# for sock, handler in _sock_list.items():
# sock.close()
# if handler.is_server:
# _send_cmd(
# {
# "cmd":CMD_SERVER_QUIT,
# "server_id":handler._my_id,
# "proj":_project,
# }
# )
# _sock_list = None
def ssrpc_method(rpcType=0):
def _wrap_func(func):
import sys
classLocals = sys._getframe(1).f_locals
dictName = "__rpc_methods__"
if dictName not in classLocals:
classLocals[dictName] = {"__ssrpc_callback__": lambda x: x}
name = func.__name__
assert name not in classLocals[dictName]
classLocals[dictName][name] = rpcType
return func
return _wrap_func
def _ssrpc_class_inherit(clz):
if not hasattr(clz, "__rpc_methods__") or "__inherited" in clz.__rpc_methods__:
return
rpc_dict = {"__inherited":True}
for x in reversed(clz.mro()):
if hasattr(x, "__rpc_methods__"):
rpc_dict.update(x.__rpc_methods__)
clz.__rpc_methods__ = rpc_dict
class _RPCb(object):
'''
SSRPC调用完后的返回对象,用于实现异步/同步(callback/wait)调用
'''
def __init__(self, prx):
super(_RPCb, self).__init__()
self.proxy = prx
self.curr_sess_id = None
def callback(self, func):
assert self.curr_sess_id
self.proxy.cb_list[self.curr_sess_id] = func
self.curr_sess_id = None
def wait(self, timeout=0):
assert _mode & MAIN_THREAD_TICK, "wait can only be used in MAIN_THREAD_TICK mode"
# TODO 加入Timeout
old_time = (time.time() + timeout) if timeout > 0 else 0
cache_cmd = []
got_data = None
while True:
if _callback_queue.qsize() > 0:
cb = _callback_queue.get()
data = self.proxy.handler._unpack_data(cb[1])
if data.get("__cb_session_id") == self.curr_sess_id:
got_data = data
break
else:
cache_cmd.append(cb)
if old_time > 0 and time.time() > old_time:
break
time.sleep(0)
for cmd in cache_cmd: # 把cache的压回去
_callback_queue.put(cmd)
return got_data["__args"] if got_data else None
class _RPCProxy(object):
'''
具体的RPC调用对象,会把rpc封装成协议包发送,及解封接收到的包
'''
SUNSHINE_UUID = None
def __init__(self, handler, is_server=False, reg_all=False):
super(_RPCProxy, self).__init__()
print (handler, is_server, reg_all);
self.handler = handler
self.is_server = is_server
if reg_all:
self.reg_all()
if self.is_server:
_ssrpc_class_inherit(self.__class__)
self.cb_sess_id = None
self.cb_sess_id_dict_for_server = {}
self.cb_list = {}
self.cb_obj = _RPCb(self)
def __getitem__(self, client_id):
self.client_id = client_id
return self
def __getattr__(self, item):
def _call(*args, **kwargs):
kwargs["__args"] = args
kwargs["__rpc_func"] = item
kwargs["__session_id"] = uuid.uuid4().hex
kwargs["__handler_id"] = self.SUNSHINE_UUID
if self.is_server:
self.handler.send(self.client_id, kwargs)
delattr(self, "client_id")
else:
self.handler.send(kwargs)
self.cb_obj.curr_sess_id = kwargs["__session_id"]
return self.cb_obj
return _call
def try_parse_kwargs(self, data):
if isinstance(data, dict) and "__rpc_func" in data:
func_name = data.pop("__rpc_func")
if func_name in getattr(self.__class__, "__rpc_methods__", {}):
func = getattr(self, func_name, None)
args = data.pop("__args", [])
return func, args, data, data.pop("__session_id", None), data.pop("__cb_session_id", None)
else:
raise RuntimeError("rpc function %s not found" % (func_name))
return None, None, data, None, None
def _exec_func(self, data, cid=None):
# print("_exec_func", self, data)
try:
func, args, data, sid, cb_sid = self.try_parse_kwargs(data)
if cb_sid: # this is a callback message
func = self.cb_list.pop(cb_sid, None)
if func:
func(*args, **data)
elif func:
# print("func:{}, args:{}, cid:{}, data:{}".format(func, args, cid, data))
rtn = func(cid, *args, **data) if cid else func(*args, **data)
if rtn is not None: # callback message generate
kwargs = {}
kwargs["__rpc_func"] = "__ssrpc_callback__"
kwargs["__args"] = [rtn] if not isinstance(rtn, tuple) else rtn
kwargs["__cb_session_id"] = sid
kwargs["__handler_id"] = self.SUNSHINE_UUID
if self.is_server:
self.handler.send(cid, kwargs)
else:
self.handler.send(kwargs)
else:
self.handler.on_data(cid, data) if cid else self.handler.on_data(data)
except:
import traceback
traceback.print_exc()
def reg_all(self):
if not hasattr(self.__class__, "__rpc_methods__"):
setattr(self.__class__, "__rpc_methods__", {})
for k, func in self.Register(None).items():
registry = self.__class__.__rpc_methods__
registry[func.__name__] = func
# print("reg", func)
def Register(self, callServer):
return {}
class ZMQHandler(object):
_zmq_socket = None
_my_id = None # 这个并不是server.on_data里面的client_id
_to_server_id = None
def __init__(self):
super(ZMQHandler, self).__init__()
self._SSRPC = None
self._rpc_registry = {}
@property
def is_server(self):
return self._to_server_id is None
@property
def is_connected(self):
return self._zmq_socket is not None
def set_zmq_socket(self, so):
self._zmq_socket = so
self._zmq_cache_msgs = []
if self.is_server:
self.send(b"imsvr", self._my_id)
def stop(self):
self._zmq_socket = None
_ZIP_MAGIC_STR = b"\xffsrz" # zip的包头判定字符串
_ZIP_MIN_SIZE = 1024 # 自动进行zip的包体大小下限
def _unpack_data(self, data):
if len(data) > 4 and data[:4] == self._ZIP_MAGIC_STR:
import zlib
data = zlib.decompress(data[4:])
data = _unpack(data)
if not is_py2:
data = _convert_bytes_to_str(data)
return data
def _pack_data(self, data):
data = _pack(data)
if len(data) > self._ZIP_MIN_SIZE:
import zlib
data = self._ZIP_MAGIC_STR + zlib.compress(data)
return data
# @property
# def SSRPC(self):
# if not self._SSRPC:
# self._SSRPC = _RPCProxy(self, self.is_server)
# return self._SSRPC
def register_rpc_handler(self, rpc_hanlder):
if rpc_hanlder.SUNSHINE_UUID in self._rpc_registry:
raise RuntimeError("duplicated SUNSHINE_UUID %s" % rpc_hanlder.SUNSHINE_UUID)
self._rpc_registry[rpc_hanlder.SUNSHINE_UUID] = rpc_hanlder
def _exec_func(self, data, cid=None):
data = self._unpack_data(data)
rpc_hanlder_id = data.pop("__handler_id")
try:
rpc_hanlder = self._rpc_registry[rpc_hanlder_id]
except KeyError:
print("SUNSHINE_UUID %s not found" % rpc_hanlder_id)
return
if _mode & MAIN_THREAD_TICK and threading.current_thread() is _socket_thread:
_callback_queue.put([rpc_hanlder, data, cid])
else:
rpc_hanlder._exec_func(data, cid)
# self.SSRPC._exec_func(data, cid)
def __getitem__(self, handlerid):
return self._rpc_registry[handlerid]
class SSRPCSvrHandler(ZMQHandler):
def __init__(self):
super(SSRPCSvrHandler, self).__init__()
self.online_clients = {}
self._clientInfoCBFunc = None
def on_init_finish(self, rtn):
pass
def on_data(self, client_id, data):
raise NotImplementedError()
def on_client_connect(self, client_id):
print("client", _hex_str(client_id), "connect")
pass
def on_client_disconnect(self, client_id):
print("client", _hex_str(client_id), "disconnect")
pass
def _on_multi_data(self, msgs):
self._zmq_cache_msgs.extend(msgs)
while len(self._zmq_cache_msgs) >= 2:
cid = self._zmq_cache_msgs.pop(0)
data = self._zmq_cache_msgs.pop(0)
if cid == b"myclients_cb": # 特殊信息
self.all_clients_info(_unpack(data))
continue
if cid not in self.online_clients:
self.on_client_connect(cid)
self.online_clients[cid] = time.time()
if data != _HEARTBEAT_DATA:
self._exec_func(data, cid)
def send(self, client_id, data):
self._zmq_socket.send_multipart([client_id, self._pack_data(data)])
def return_sync_result(self, client_id, data, session_id):
send_data = {"data": data, "__session_id": session_id}
self._zmq_socket.send_multipart([client_id, self._pack_data(send_data)])
def send_heartbeat(self):
self._zmq_socket.send_multipart([_HEARTBEAT_DATA, _pack(self._my_id)])
for cli in list(self.online_clients):
if time.time() - self.online_clients[cli] > _HEARTBEAT_SEC * 2:
self.online_clients.pop(cli)
self.on_client_disconnect(cli)
def get_all_clients(self, cbFunc=None):
"""获取连接该节点的客户端信息,中心节点上的clients"""
self.send(b"myclients", "")
self._clientInfoCBFunc = cbFunc
def all_clients_info(self, data):
"""从连接节点返回的客户端信息"""
if self._clientInfoCBFunc:
self._clientInfoCBFunc(data)
else:
print("all connected clients:")
for key, val in data.items():
print("client: %s, info: %s" % (_hex_str(key), val)) # debug用
class SSRPCCliHandler(ZMQHandler):
def __init__(self):
self.__callbacks = {} # callback缓存
super(SSRPCCliHandler, self).__init__()
def _registerCallback(self, callback):
""" 注册回调 """
callbackId = str(uuid.uuid4()).encode("UTF-8") # python 2 与 3 的 str 不同,所以传bytes
self.__callbacks[callbackId] = callback
return callbackId
@ssrpc_method()
def callback(self, callback_id, *args, **kwargs):
"""Server回调过来,根据 callback_id 找回 callback 函数"""
callback_id = callback_id.encode("UTF-8")
self.__callbacks[callback_id](*args, **kwargs)
del self.__callbacks[callback_id]
def on_init_finish(self, rtn):
print("on_init_finish", self, rtn)
def on_data(self, data):
raise NotImplementedError()
def _on_raw_data(self, data):
self._exec_func(data)
def send(self, data):
if hasattr(self, "_zmq_socket") and self._zmq_socket is not None:
self._zmq_socket.send(self._pack_data(data))
def send_heartbeat(self):
self._zmq_socket.send(_HEARTBEAT_DATA)
|
StarcoderdataPython
|
289805
|
"""
Demonstrates swapping the values of two variables
"""
number1 = 65 #Declares a variable named number1 and assigns it the value 65
number2 = 27 #Declares a variable named number2 and assigns it the value 27
temp_number = number1 #Copies the reference of number1 to temp_number
number1 = number2 #Copies the reference of number2 to number1
number2 = temp_number #Copies the reference of temp_number to number2
print(number1) #Prints the value referenced by number1
print(number2) #Prints the value referenced by number2
|
StarcoderdataPython
|
3429865
|
import string
# the class-based expressions are mostly for organization
# but sometimes they're just too clunky
LEFT_BRACKET = '['
RIGHT_BRACKET = ']'
class FormalDefinition(object):
"""
The basic operators and elements of a regular expression
"""
empty_string = '^$'
alternative = '|'
OR = alternative
kleene_star = "*"
class Group(object):
"""
The Group helps with regular expression groups
"""
__slots__ = ()
@staticmethod
def group(expression):
"""
Create a grouped expression
:param:
- `expression`: the regular expression to group
:return: uncompiled group expression for e
"""
return "({e})".format(e=expression)
@staticmethod
def named(name, expression):
"""
Creates a named group
:param:
- `name`: name to give the group
- `expression`: expression to use in the group
"""
return "(?P<{n}>{e})".format(n=name,
e=expression)
@staticmethod
def followed_by(suffix):
"""
Creates the expression to match if followed by the suffix
"""
return "(?={0})".format(suffix)
@staticmethod
def not_followed_by(suffix):
"""
Creates a (perl) negative lookahead expression
e.g. 'alpha(?!beta)' matches 'alpha' and 'alphagamma', not 'alphabeta'
:param:
- `suffix`: suffix to avoid matching
"""
return "(?!{s})".format(s=suffix)
@staticmethod
def preceded_by(prefix):
"""
Creates a look-behind expression
:param:
- `prefix`: an expression of fixed-order (no quantifiers or alternatives of different length)
"""
return "(?<={0})".format(prefix)
@staticmethod
def not_preceded_by(prefix):
"""
Creates a (perl) negative look behind expression
:param:
- `prefix`: expression to group
"""
return "(?<!{e})".format(e=prefix)
class Quantifier(object):
"""
A class to hold cardinality helpers
"""
__slots__ = ()
@staticmethod
def one_or_more(pattern):
"""
Adds the one-or-more quantifier to the end of the pattern.
"""
return '{0}+'.format(pattern)
@staticmethod
def zero_or_one(pattern):
"""
Adds the zero-or-one quantifier to the pattern
"""
return '{0}?'.format(pattern)
@staticmethod
def exactly(repetitions):
"""
Creates suffix to match source repeated exactly n times
:param:
- `repetitions`: number of times pattern has to repeat to match
"""
return "{{{0}}}".format(repetitions)
@staticmethod
def zero_or_more(pattern):
"""
Adds the kleene star to the pattern
:return: pattern*
"""
return "{0}*".format(pattern)
@staticmethod
def m_to_n(m, n):
"""
Creates a repetition ranges suffix {m,n}
:param:
- `m`: the minimum required number of matches
- `n`: the maximum number of matches
"""
return "{{{m},{n}}}".format(m=m, n=n)
class CharacterClass(object):
"""
A class to help with character classes
"""
__slots__ = ()
alpha_num = r"\w"
alpha_nums = Quantifier.one_or_more(alpha_num)
digit = r'\d'
non_digit = r'\D'
non_zero_digit = r"[1-9]"
@staticmethod
def character_class(characters):
"""
Creates a character class from the expression
:param:
- `characters`: string to convert to a class
:return: expression to match any character in expression
"""
return "[{e}]".format(e=characters)
@staticmethod
def not_in(characters):
"""
Creates a complement character class
:param:
- `characters`: characters to not match
:return: expression to match any character not in expression
"""
return "[^{e}]".format(e=characters)
class Boundaries(object):
"""
A class to hold boundaries for expressions
"""
__slots__ = ()
string_start = "^"
string_end = "$"
@staticmethod
def word(word):
"""
Adds word boundaries to the word
:param:
- `word`: string to add word boundaries to
:return: string (raw) with word boundaries on both ends
"""
return r"\b{e}\b".format(e=word)
@staticmethod
def string(string):
"""
Adds boundaries to only match an entire string
:param:
- `string`: string to add boundaries to
:return: expression that only matches an entire line of text
"""
return r"^{e}$".format(e=string)
class CommonPatterns(object):
"""
The common patterns that were leftover
"""
__slots__ = ()
#anything and everything
anything = r"."
everything = Quantifier.zero_or_more(anything)
letter = CharacterClass.character_class(characters=string.ascii_letters)
letters = Quantifier.one_or_more(letter)
optional_letters = Quantifier.zero_or_more(letter)
space = r'\s'
spaces = Quantifier.one_or_more(space)
optional_spaces = Quantifier.zero_or_more(space)
not_space = r'\S'
not_spaces = Quantifier.one_or_more(not_space)
class Numbers(object):
"""
A class to hold number-related expressions
"""
__slots__ = ()
decimal_point = r'\.'
single_digit = Boundaries.word(CharacterClass.digit)
digits = Quantifier.one_or_more(CharacterClass.digit)
two_digits = Boundaries.word(CharacterClass.non_zero_digit + CharacterClass.digit)
one_hundreds = Boundaries.word("1" + CharacterClass.digit * 2)
optional_digits = Quantifier.zero_or_more(CharacterClass.digit)
# python considers string-start and whitespace to be different lengths
# so to avoid '.' (which is a word-boundary character) and use line-starts and ends
# and whitespace requires four alternatives
START_PREFIX = Group.preceded_by(Boundaries.string_start)
END_SUFFIX = Group.followed_by(Boundaries.string_end)
SPACE_PREFIX = Group.preceded_by(CommonPatterns.space)
SPACE_SUFFIX = Group.followed_by(CommonPatterns.space)
# Zero
ZERO = '0'
zero = (START_PREFIX + ZERO + END_SUFFIX +FormalDefinition.OR +
START_PREFIX + ZERO + SPACE_SUFFIX +FormalDefinition.OR +
SPACE_PREFIX + ZERO + END_SUFFIX +FormalDefinition.OR +
SPACE_PREFIX + ZERO + SPACE_SUFFIX)
# positive integer
z_plus = CharacterClass.non_zero_digit + optional_digits
positive_integer = (START_PREFIX + z_plus + END_SUFFIX +FormalDefinition.OR +
START_PREFIX + z_plus + SPACE_SUFFIX +FormalDefinition.OR +
SPACE_PREFIX + z_plus + END_SUFFIX +FormalDefinition.OR +
SPACE_PREFIX + z_plus + SPACE_SUFFIX )
nonnegative_integer = (Group.not_preceded_by(decimal_point +FormalDefinition.OR + '0') +
CharacterClass.non_zero_digit + optional_digits +
r'\b' +FormalDefinition.OR +
Boundaries.word('0'))
integer = (Group.not_preceded_by(decimal_point +FormalDefinition.OR + '0') +
Quantifier.zero_or_one('-') +
CharacterClass.non_zero_digit + optional_digits +
r'\b' +FormalDefinition.OR +
Boundaries.word('0'))
real = (Group.not_preceded_by(decimal_point +FormalDefinition.OR + '0') +
Quantifier.zero_or_one('-') +
CharacterClass.digit + optional_digits +
'.' + optional_digits +
r'\b' ) +FormalDefinition.OR + integer
HEX = CharacterClass.character_class(string.hexdigits)
hexadecimal = Quantifier.one_or_more(HEX)
class Networking(object):
"""
Holds expressions to help with networking-related text.
"""
__slots__ = ()
octet = Group.group(expression=FormalDefinition.OR.join([Numbers.single_digit,
Numbers.two_digits,
Numbers.one_hundreds,
Boundaries.word("2[0-4][0-9]"),
Boundaries.word("25[0-5]")]))
dot = Numbers.decimal_point
ip_address = dot.join([octet] * 4)
hex_pair = Numbers.HEX + Quantifier.exactly(2)
mac_address = ":".join([hex_pair] * 6)
# python standard library
import unittest
import random
import re
#third-party
try:
import numpy.random as nrandom
except ImportError:
pass
# this package
from randomizer import Randomizer
L_GROUP = '('
R_GROUP = ')'
L_PERL_GROUP = L_GROUP + "?"
class TestOatBranGroup(unittest.TestCase):
def test_group(self):
"""
Does the group method add parentheses?
"""
sample = Randomizer.letters()
exp = Group.group(sample)
self.assertEqual("(" + sample + ")",exp)
matched = re.search(exp,sample+Randomizer.letters()).groups()[0]
self.assertEqual(matched, sample)
return
def test_named(self):
"""
Does the named method create a named group?
"""
name = Randomizer.letters()
sample = Randomizer.letters()
text = Randomizer.letters() + sample + Randomizer.letters()
exp = Group.named(name=name, expression=sample)
expected = '(?P<' + name + '>' + sample + ")"
self.assertEqual(expected, exp)
matched = re.search(exp, text).groupdict()[name]
self.assertEqual(sample, matched)
return
def test_followed_by(self):
"""
Does it match strings followed by a pattern?
"""
body = Randomizer.letters()
sub_string = Randomizer.letters()
suffix = Randomizer.letters()
text = body + sub_string + suffix
name = 'followed'
expression = Group.named(name,
sub_string + Group.followed_by(suffix))
match = re.search(expression, text)
self.assertEqual(match.group(name), sub_string)
def test_not_followed_by(self):
"""
Does not_followed_by create a negative lookahead assertion?
"""
prefix = Randomizer.letters(maximum=5)
suffix = Randomizer.letters_complement(prefix)
expr = Group.not_followed_by(suffix)
text = Randomizer.letters()
self.assertEqual(L_PERL_GROUP + '!' + suffix + R_GROUP,
expr)
self.assertIsNone(re.search(text + expr, text + suffix))
self.assertIsNotNone(re.search(text + expr, text))
return
def test_preceded_by(self):
"Does it match a substring with a prefix?"
name = 'preceded'
body = Randomizer.letters()
sub_string = Randomizer.letters()
prefix = Randomizer.letters()
expression = Group.named(name,
Group.preceded_by(prefix) + sub_string)
text = body + prefix + sub_string
match = re.search(expression, text)
self.assertEqual(match.group(name),
sub_string)
def test_not_preceded_by(self):
'''
Does it create a negative look-behind expression?
'''
prefix = Randomizer.letters()
expr = Group.not_preceded_by(prefix)
self.assertEqual(L_PERL_GROUP + "<!" + prefix + R_GROUP,
expr)
text = Randomizer.letters(minimum=5)
is_preceded_by = prefix + text
self.assertIsNone(re.search(expr + text, is_preceded_by))
self.assertIsNotNone(re.search(expr + text, text))
return
class TestOatBranClass(unittest.TestCase):
def test_class(self):
'''
Does it convert the string to a character class?
'''
sample = Randomizer.letters()
expression = CharacterClass.character_class(sample)
self.assertEqual(LEFT_BRACKET + sample + RIGHT_BRACKET, expression)
sub_string = random.choice(sample)
complement = Randomizer.letters_complement(sample)
self.assertIsNotNone(re.search(expression, sub_string))
self.assertIsNone(re.search(expression, complement))
return
def test_not(self):
'''
Does it convert the string to a non-matching class?
'''
sample = Randomizer.letters(maximum=10)
complement = Randomizer.letters_complement(sample)
expression = CharacterClass.not_in(sample)
self.assertEqual(LEFT_BRACKET + '^' + sample + RIGHT_BRACKET,
expression)
self.assertIsNone(re.search(expression, sample))
self.assertIsNotNone(re.search(expression, complement))
return
def test_alpha_num(self):
"""
Does it return alpha-num character class (plus underscore)?
"""
expression = CharacterClass.alpha_num
character = random.choice(string.letters + string.digits + '_')
non_alpha = random.choice(string.punctuation.replace('_', ''))
self.assertIsNotNone(re.search(expression, character))
self.assertIsNone(re.search(expression, non_alpha))
return
def test_alpha_nums(self):
"""
Does it return the expression to match one or more alpha-nums?
"""
expression = CharacterClass.alpha_nums
class TestQuantifier(unittest.TestCase):
def test_one_or_more(self):
"""
Does it return the one-or-more metacharachter?
"""
character = random.choice(string.letters)
complement = Randomizer.letters_complement(character)
text = Randomizer.letters() + character * random.randint(1,100) + Randomizer.letters()
expression = character + '+'
self.assertIsNone(re.search(expression, complement))
self.assertIsNotNone(re.search(expression, text))
return
def test_zero_or_more(self):
"""
Does it return the kleene star?
"""
substring = Randomizer.letters()
text = Randomizer.letters()
complement = text + Randomizer.letters_complement(substring)
expression = text + Quantifier.zero_or_more('(' + substring + ')')
text_1 = text + substring * random.randint(0, 10) + Randomizer.letters()
self.assertIsNotNone(re.search(expression, complement))
self.assertIsNotNone(re.search(expression, text_1))
return
def test_zero_or_one(self):
"""
Does it return the zero-or-one quantifier?
"""
substring = Randomizer.letters()
text = Randomizer.letters()
expression = text + Quantifier.zero_or_one("(" + substring + ")")
text_1 = text + substring * random.randint(1,100)
text_2 = text + substring * random.randint(1,100)
self.assertIsNotNone(re.search(expression, text_1))
self.assertEqual(re.search(expression, text_2).groups()[0], substring)
return
def test_exactly(self):
"""
Does it return the repetition suffix?
"""
repetitions = Randomizer.integer(minimum=1, maximum=5)
repeater = Randomizer.letters()
expected = "{" + "{0}".format(repetitions) + "}"
quantifier = Quantifier.exactly(repetitions)
self.assertEqual(expected, quantifier)
expression = "(" + repeater + ")" + quantifier
text = Randomizer.letters() + repeater * (repetitions + Randomizer.integer(minimum=0))
self.assertIsNotNone(re.search(expression, text))
self.assertEqual(re.search(expression, text).groups(), (repeater,))
return
def test_m_to_n(self):
"""
Does it return the expression to match m-to-n repetitions
"""
m = Randomizer.integer(minimum=5)
n = Randomizer.integer(minimum=m+1)
substring = Randomizer.letters()
quantifier = Quantifier.m_to_n(m,n)
expression = '(' + substring + ')' + quantifier
self.assertEqual("{" + str(m) + ',' + str(n) + '}',quantifier)
text = Randomizer.letters() + substring * Randomizer.integer(m, n)
complement = (Randomizer.letters_complement(substring) +
substring * Randomizer.integer(0,m-1))
too_many = substring * Randomizer.integer(n+1, n*2)
self.assertIsNotNone(re.search(expression, text))
self.assertIsNone(re.search(expression, complement))
self.assertEqual(re.search(expression, too_many).groups(), (substring,))
return
class TestBoundaries(unittest.TestCase):
def test_word_boundary(self):
"""
Does it add word-boundaries to the expression
"""
word = Randomizer.letters()
expected = r'\b' + word + r'\b'
expression = Boundaries.word(word)
bad_word = word + Randomizer.letters()
text = ' '.join([Randomizer.letters(),word,Randomizer.letters()])
self.assertIsNone(re.search(expression, bad_word))
self.assertIsNotNone(re.search(expression, text))
return
def test_string_boundary(self):
"""
Does it add boundaries to match a whole line?
"""
substring = Randomizer.letters()
expression = Boundaries.string(substring)
expected = "^" + substring + "$"
self.assertEqual(expected, expression)
self.assertIsNotNone(re.search(expression, substring))
self.assertIsNone(re.search(expression, ' ' + substring))
return
def test_string_start(self):
"""
Does it have return a string start metacharacter?
"""
metacharacter = Boundaries.string_start
expected = '^'
self.assertEqual(expected, metacharacter)
word = Randomizer.letters()
expression = Boundaries.string_start + word
text = word + Randomizer.letters()
self.assertIsNotNone(re.search(expression, text))
self.assertIsNone(re.search(expression, " " + text))
return
def test_string_end(self):
"""
Does it return the end of string metacharacter?
"""
metacharacter = Boundaries.string_end
word = Randomizer.letters()
expression = word + metacharacter
text = Randomizer.letters() + word
self.assertIsNotNone(re.search(expression, text))
self.assertIsNone(re.search(expression, text + Randomizer.letters()))
return
class TestNumbers(unittest.TestCase):
def test_decimal_point(self):
"""
Does it return a decimal point literal?
"""
metacharacter = Numbers.decimal_point
test = random.uniform(0,100)
self.assertIsNotNone(re.search(metacharacter, str(test)))
self.assertIsNone(re.search(metacharacter, Randomizer.letters()))
return
def test_digit(self):
"""
Does it return the digit character class?
"""
metacharacter = CharacterClass.digit
test = Randomizer.integer(maximum=9)
self.assertIsNotNone(re.search(metacharacter, str(test)))
self.assertIsNone(re.search(metacharacter, Randomizer.letters()))
return
def test_non_digit(self):
"""
Does it return the anything-but-a-digit metacharacter?
"""
metacharacter = CharacterClass.non_digit
test = str(Randomizer.integer(maximum=9))
self.assertIsNone(re.search(metacharacter, test))
return
def test_non_zero(self):
"""
Does it return an expression to match 1-9 only?
"""
expression = CharacterClass.non_zero_digit
test = str(random.choice(range(1,10)))
self.assertIsNotNone(re.search(expression, test))
self.assertIsNone(re.search(expression, '0'))
return
def test_single_digit(self):
"""
Does it return an expression to match only one digit?
"""
expression = Numbers.single_digit
test = str(Randomizer.integer(maximum=9))
two_many = str(Randomizer.integer(minimum=10))
self.assertIsNotNone(re.search(expression, test))
self.assertIsNone(re.search(expression, two_many))
return
def test_two_digits(self):
"""
Does it return an expression to match exactly two digits?
"""
expression = Numbers.two_digits
test = str(Randomizer.integer(minimum=10,maximum=99))
fail = random.choice([str(Randomizer.integer(0,9)), str(Randomizer.integer(100,1000))])
self.assertIsNotNone(re.search(expression, test))
self.assertIsNone(re.search(expression, fail))
return
def test_one_hundreds(self):
"""
Does it match values from 100-199?
"""
number = "{0}".format(random.randint(100,199))
low_number = str(random.randint(-99,99))
high_number = str(random.randint(200,500))
float_number = str(random.uniform(100,199))
text = Randomizer.letters() + str(random.randint(100,199))
name = 'onehundred'
expression = re.compile(Group.named(name,
Numbers.one_hundreds))
self.assertIsNotNone(re.search(Numbers.one_hundreds, number))
self.assertIsNone(re.search(Numbers.one_hundreds, low_number))
self.assertIsNone(re.search(Numbers.one_hundreds, high_number))
# it only checks word boundaries and the decimal point is a boundary
self.assertIsNotNone(re.search(Numbers.one_hundreds, float_number))
# it needs a word boundary so letters smashed against it will fail
self.assertIsNone(re.search(Numbers.one_hundreds, text))
return
def test_digits(self):
"Does it match one or more digits?"
expression = Group.named(name='digits', expression=Numbers.digits)
first = "{0}".format(random.randint(0,9))
rest = str(random.randint(0,1000))
test = first + rest
self.assertIsNotNone(re.search(expression, test))
match = re.search(expression, test)
self.assertEqual(match.group('digits'), test)
mangled = Randomizer.letters() + test + Randomizer.letters()
match = re.search(expression, mangled)
self.assertEqual(match.group('digits'), test)
return
def test_zero(self):
"Does it match zero by itself?"
name = 'zero'
expression = Group.named(name,
Numbers.zero)
prefix = random.choice(['', ' '])
suffix = random.choice(['', ' '])
zero = '0'
text = prefix + zero + suffix
match = re.search(expression, text)
self.assertEqual(match.group(name), zero)
self.assertIsNone(re.search(expression, str(random.randint(1,100))))
return
def test_positive_integers(self):
'Does it only match 1,2,3,...?'
name = 'positiveintegers'
expression = Group.named(name,
Numbers.positive_integer)
regex = re.compile(expression)
# zero should fail
self.assertIsNone(regex.search('0' ))
# positive integer (without sign) should match
first_digit = str(nrandom.randint(1,9))
positive_integer = first_digit + ''.join(str(i) for i in nrandom.random_integers(1,9,
size=nrandom.randint(100)))
match = regex.search(positive_integer)
self.assertEqual(match.group(name), positive_integer)
# negative integer should fail
negation = '-' + positive_integer
self.assertIsNone(regex.search(negation))
# surrounding white space should be trimmed off
spaces = " " * nrandom.randint(100) + positive_integer + ' ' * nrandom.randint(100)
match = regex.search(spaces)
self.assertEqual(match.group(name), positive_integer)
# leading zero should fail
leading_zeros = '0' * nrandom.randint(1,100) + positive_integer
self.assertIsNone(regex.search(leading_zeros))
return
def test_integers(self):
"""
Does it match positive and negative integers?
"""
name = 'integer'
expression = Group.named(name, Numbers.integer)
regex = re.compile(expression)
# 0 alone should match
zero = '0'
match = regex.search(zero)
self.assertEqual(match.group(name), zero)
# positive_integers should match
first_digit = str(nrandom.randint(1,9))
positive = first_digit +''.join(str(i) for i in nrandom.random_integers(0,9, nrandom.randint(1, 100)))
match = regex.search(positive)
self.assertEqual(match.group(name), positive)
# negatives should match
negative = '-' + positive
match = regex.search(negative)
self.assertEqual(match.group(name), negative)
# white space boundaries should work too
number = nrandom.choice(('','-')) + positive
text = " " * nrandom.randint(10) + number + ' ' * nrandom.randint(10)
match = regex.search(text)
self.assertEqual(match.group(name), number)
# punctuation should work (like for csvs)
text = number + ','
match = regex.search(text)
self.assertEqual(match.group(name), number)
# match prefix to decimal points
# this is not really what I wanted but otherwise it's hard to use in text
text = number + '.' + str(nrandom.randint(100))
match = regex.search(text)
self.assertEqual(match.group(name), number)
return
def test_nonnegative_integer(self):
"""
Does it match positive integers and 0?
"""
name = 'nonnegative'
expression = Group.named(name,
Numbers.nonnegative_integer)
regex = re.compile(expression)
number = str(nrandom.randint(1,9)) + str(nrandom.randint(1000))
match = regex.search(number)
self.assertEqual(number, match.group(name))
# should match 0
zero = '0'
match = regex.search(zero)
self.assertEqual(match.group(name), zero)
# should not match negative
# but, to allow it in text, it will grab the integer to the right
# in other words, it assumes the `-` is part of a sentence but not part of the number
negation = '-' + number
match = regex.search(negation)
self.assertEqual(match.group(name), number)
return
def assert_match(self, regex, text, name, expected):
match = regex.search(text)
self.assertEqual(match.group(name), expected)
return
def test_real(self):
"""
Does it match floating-point numbers?
"""
name = 'real'
expression = Group.named(name,
Numbers.real)
regex = re.compile(expression)
# does it match 0?
zero = '0'
self.assert_match(regex, zero, name, zero)
# does it match a leading 0?
number = '0.' + str(nrandom.randint(100))
self.assert_match(regex, number, name, number)
# does it match a whole decimal
number = str(nrandom.randint(1,100)) + '.' + str(nrandom.randint(100))
self.assert_match(regex, number, name, number)
# what about positive and negative?
number = (random.choice(('', '-')) + str(nrandom.randint(100)) +
random.choice(('', '.')) + str(nrandom.randint(100)))
text = ' ' * nrandom.randint(5) + number + ' ' * nrandom.randint(5)
self.assert_match(regex, text, name, number)
# what happens if it comes at the end of a sentence?
number = (random.choice(('', '-')) + str(nrandom.randint(100)) +
random.choice(('', '.')) + str(nrandom.randint(100)))
text = number + '.'
self.assert_match(regex, text, name, number)
return
def test_hexadecimal(self):
"""
Does it match hexadecimal numbers?
"""
name = 'hexadecimal'
number = ''.join((random.choice(string.hexdigits) for char in xrange(random.randint(1,100))))
non_hex = 'IJKLMNOPQRSTUVWXYZ'
text = random.choice(non_hex) + number + non_hex
expression = re.compile(Group.named(name,
Numbers.hexadecimal))
match = expression.search(text)
self.assertEqual(match.group(name), number)
return
class TestFormalDefinition(unittest.TestCase):
def test_empty_string(self):
"Does it match only an empty string?"
name = 'empty'
expression = Group.named(name,
FormalDefinition.empty_string)
empty = ''
not_empty = Randomizer.letters()
match = re.search(expression, empty)
self.assertEqual(empty, match.group(name))
self.assertIsNone(re.search(expression, not_empty))
return
def test_alternation(self):
"""
Does it match alternatives?
"""
name = 'or'
# this might fail if one of the terms is a sub-string of another
# and the longer term is chosen as the search term
terms = [Randomizer.letters() for term in range(random.randint(10, 100))]
expression = Group.named(name,
FormalDefinition.alternative.join(terms))
test = terms[random.randrange(len(terms))]
match = re.search(expression, test)
self.assertEqual(test, match.group(name))
return
def test_kleene_star(self):
"""
Does it match zero or more of something?
"""
name = 'kleene'
term = random.choice(string.letters)
expression = Group.named(name,
term + FormalDefinition.kleene_star)
test = term * random.randint(0, 100)
match = re.search(expression, test)
self.assertEqual(test, match.group(name))
return
class TestNetworking(unittest.TestCase):
def test_octet(self):
"""
Does it match a valid octet?
"""
name = 'octet'
expression = re.compile(Group.named(name,
Networking.octet))
for t1 in '198 10 1 255'.split():
match = expression.search(t1)
self.assertEqual(t1, match.group(name))
bad_octet = random.randint(256, 999)
self.assertIsNone(expression.search(str(bad_octet)))
return
def test_ip_address(self):
"""
Does it match a valid ip address?
"""
name = 'ipaddress'
expression = re.compile(Group.named(name,
Networking.ip_address))
for text in '192.168.0.1 10.10.10.2 192.168.3.11'.split():
match = expression.search(text)
self.assertEqual(match.group(name), text)
for bad_ip in "10.10.10 12.9.49.256 ape".split():
self.assertIsNone(expression.search(bad_ip))
return
def test_mac_address(self):
name = 'macaddress'
expression = re.compile(Group.named(name,
Networking.mac_address))
text = 'f8:d1:11:03:12:58'
self.assertEqual(expression.search(text).group(name),
text)
return
|
StarcoderdataPython
|
1717850
|
'''
(c) University of Liverpool 2019
All rights reserved.
@author: neilswainston
'''
# pylint: disable=invalid-name
# pylint: disable=wrong-import-order
from sklearn.metrics import classification_report, confusion_matrix
from liv_learn.keras import classify_lstm
from liv_learn.utils.biochem_utils import fasta_to_df, get_ordinal_seq
import pandas as pd
def get_data():
'''Get data.'''
pos_df = fasta_to_df('data/thermostability/h.txt')
neg_df = fasta_to_df('data/thermostability/l.txt')
pos_df['thermostability'] = 1.0
neg_df['thermostability'] = 0.0
df = pd.concat([pos_df, neg_df])
df = df.sample(frac=1)
return df
def main():
'''main method.'''
df = get_data()
X = get_ordinal_seq(df['seq'])
y = df['thermostability']
scores, y_true, y_pred = classify_lstm(X, y)
print('\nAccuracy: %.2f%%' % (scores[1] * 100))
print('\nConfusion Matrix')
print(confusion_matrix(y_true, y_pred))
print('\nClassification Report')
print(classification_report(y_true, y_pred, target_names=['l', 'h']))
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3369987
|
from simbatch.core import core as batch
from simbatch.core.actions import SingleAction
import pytest
@pytest.fixture(scope="module")
def sib():
# TODO pytest-datadir pytest-datafiles vs ( path.dirname( path.realpath(sys.argv[0]) )
sib = batch.SimBatch(5, ini_file="config_tests.ini")
return sib
def test_prepare_data_directory_by_delete_all_files(sib):
assert sib.sts.store_data_mode is not None
if sib.sts.store_data_mode == 1:
assert sib.comfun.path_exists(sib.sts.store_data_json_directory) is True
else:
# PRO version with sql
pass
# sib.sch.clear_all_schemas_data(clear_stored_data=True)
sib.sch.delete_json_schema_file()
def test_no_schema_data(sib):
assert len(sib.sts.store_data_json_directory) > 0
assert len(sib.sts.JSON_SCHEMAS_FILE_NAME) > 0
assert sib.comfun.file_exists(sib.sts.store_data_json_directory + sib.sts.JSON_SCHEMAS_FILE_NAME) is False
def test_create_example_schemas_data(sib):
assert sib.sch.create_example_schemas_data(do_save=True) == sib.sch.sample_data_checksum
assert sib.sch.sample_data_checksum is not None
assert sib.sch.sample_data_total is not None
assert sib.sch.total_schemas == sib.sch.sample_data_total
def test_exist_proj_data(sib):
assert sib.comfun.file_exists(sib.sts.store_data_json_directory + sib.sts.JSON_SCHEMAS_FILE_NAME) is True
def test_clear_all_schemas_data(sib):
assert sib.sch.clear_all_schemas_data() is True
assert sib.sch.total_schemas == 0
assert len(sib.sch.schemas_data) == 0
def test_json_schemas_data(sib):
assert sib.sts.store_data_mode is not None
if sib.sts.store_data_mode == 1:
json_file = sib.sts.store_data_json_directory + sib.sts.JSON_SCHEMAS_FILE_NAME
json_schemas = sib.comfun.load_json_file(json_file)
json_keys = json_schemas.keys()
assert ("schemas" in json_keys) is True
def test_get_none_index_from_id(sib):
assert sib.sch.get_index_by_id(2) is None
def test_load_schemas_from_json(sib):
json_file = sib.sts.store_data_json_directory + sib.sts.JSON_SCHEMAS_FILE_NAME
assert sib.comfun.file_exists(json_file) is True
assert sib.sch.load_schemas_from_json(json_file=json_file) is True
assert sib.sch.total_schemas == sib.sch.sample_data_total
def test_get2_index_from_id(sib):
assert sib.sch.get_index_by_id(2) == 1
def test_load_schemas(sib):
assert sib.sch.clear_all_schemas_data() is True
assert sib.sch.total_schemas == 0
assert sib.sch.load_schemas() is True
def test_get3_index_from_id(sib):
assert sib.sch.get_index_by_id(3) == 2
def test_total_schemas(sib):
assert sib.sch.total_schemas == sib.sch.sample_data_total
assert len(sib.sch.schemas_data) == sib.sch.sample_data_total
def test_update_current_from_id(sib):
assert sib.sch.current_schema_id is None
assert sib.sch.current_schema_index is None
assert sib.sch.update_current_from_id(2) == 1
# sib.prj.print_all()
assert sib.sch.current_schema_id == 2
assert sib.sch.current_schema_index == 1
assert sib.sch.current_schema.schema_name == "schema 2"
def test_update_current_from_index(sib):
sib.sch.current_schema_id = None
sib.sch.current_schema_index = None
assert sib.sch.update_current_from_index(2) == 3
assert sib.sch.current_schema_id == 3
assert sib.sch.current_schema_index == 2
assert sib.sch.current_schema.schema_name == "schema 3"
def test_current_schema_details(sib):
assert sib.sch.current_schema.id == 3
assert sib.sch.current_schema.schema_name == "schema 3"
assert sib.sch.current_schema.state_id == 22
assert sib.sch.current_schema.state == "ACTIVE"
assert sib.sch.current_schema.schema_version == 5
assert sib.sch.current_schema.project_id == 2
assert sib.sch.current_schema.based_on_definition == "virtual_definition"
assert len(sib.sch.current_schema.actions_array) > 0 # TODO precise
assert sib.sch.current_schema.description == "fire with smoke"
def test_remove_single_schema_by_id(sib):
assert sib.sch.remove_single_schema(id=1) is True
assert sib.sch.total_schemas == 3
assert len(sib.sch.schemas_data) == 3
def test_remove_single_schema_by_index(sib):
assert sib.sch.remove_single_schema(index=1) is True
assert sib.sch.total_schemas == 2
assert len(sib.sch.schemas_data) == 2
def test_actions_in_single_schema(sib):
assert sib.sch.total_schemas == 2
assert len(sib.sch.schemas_data) == 2
def test_add_schema(sib):
assert len(sib.sch.current_schema.actions_array) == 1
# sia = SingleAction(-1, "virtual action", "virt descr", "<val>", "template <f>", type="single", ui=(("ui", "2+2")))
sia = SingleAction("virtual action", "virt descr", "<val>", "template <f>", mode="single", ui=(("ui", "2+2")))
sib.sch.current_schema.add_action_to_schema(sia)
assert len(sib.sch.current_schema.actions_array) == 2
def test_print_current(sib):
sib.sch.print_current()
def test_print_all(sib):
sib.sch.print_all()
|
StarcoderdataPython
|
4806755
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Step 3X Preprocessing: Feature Selection
License_info: ISC
ISC License
Copyright (c) 2020, <NAME>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
# Futures
#from __future__ import print_function
# Built-in/Generic Imports
import os
# Libs
import argparse
import numpy as np
import pandas as pd
from pandas.plotting import register_matplotlib_converters
from pickle import dump #Save data
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve, precision_recall_curve, auc, make_scorer, recall_score, accuracy_score, precision_score, f1_score, confusion_matrix
# Own modules
#import utils.data_visualization_functions as vis
import utils.data_handling_support_functions as sup
from filepaths import Paths
__author__ = '<NAME>'
__copyright__ = 'Copyright 2020, Christian Doppler Laboratory for ' \
'Embedded Machine Learning'
__credits__ = ['']
__license__ = 'ISC'
__version__ = '0.2.0'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Experiental'
register_matplotlib_converters()
#Global settings
np.set_printoptions(precision=3)
#Suppress print out in scientific notiation
np.set_printoptions(suppress=True)
parser = argparse.ArgumentParser(description='Step 3 - Prepare data for machine learning algorithms')
parser.add_argument("-conf", '--config_path', default="config/debug_timedata_omxS30.ini",
help='Configuration file path', required=False)
#parser.add_argument("-i", "--do_inference", action='store_true',
# help="Set inference if only inference and no training")
args = parser.parse_args()
def split_train_validation_data(config_path):
#Load all paths
#paths = Paths(config_path).path
print("=== Split data into training and validation data ===")
conf = sup.load_config(config_path)
features, y, df_y, class_labels = sup.load_features(conf)
#Load training data
#df_X, y, y_classes, df_feature_columns = load_files(paths, do_inference)
X_train, X_val, y_train, y_val = \
train_test_split(features, df_y, random_state=0,
test_size=float(conf['Preparation'].get('test_size')) ,
shuffle=conf['Preparation'].get('shuffle_data')=='True')
print("Total number of samples: {}. X_train: {}, X_test: {}, y_train: {}, y_test: {}".
format(features.shape[0], X_train.shape, X_val.shape, y_train.shape, y_val.shape))
#Check if training and test data have all classes
if len(np.unique(y_train))==1:
raise Exception("y_train only consists one class after train/test split. Please adjust the data.")
if len(np.unique(y_val)) == 1:
raise Exception("y_test only consists one class after train/test split. Please adjust the data.")
# Save results
X_train.to_csv(
os.path.join(conf['Paths'].get('prepared_data_directory'), conf['Preparation'].get('features_out_train')),
sep=';', index=True, header=True)
X_val.to_csv(
os.path.join(conf['Paths'].get('prepared_data_directory'), conf['Preparation'].get('features_out_val')),
sep=';', index=True, header=True)
y_train.to_csv(
os.path.join(conf['Paths'].get('prepared_data_directory'), conf['Preparation'].get('outcomes_out_train')),
sep=';', index=True, header=True)
y_val.to_csv(
os.path.join(conf['Paths'].get('prepared_data_directory'), conf['Preparation'].get('outcomes_out_val')),
sep=';', index=True, header=True)
print("Saved training and validation files.")
if __name__ == "__main__":
split_train_validation_data(args.config_path)
print("=== Program end ===")
|
StarcoderdataPython
|
151673
|
<reponame>chrisseto/pycon2020-big-o-no
import random
from django.db import transaction
from django.core.management.base import BaseCommand, CommandError
from faker import Faker
from app.models import *
class Command(BaseCommand):
help = 'Populate the database will fake data'
def handle(self, *args, **options):
fake = Faker()
# Generate fake data and hold it in memory.
# Selecting random entries from these lists will be faster than
# generating a new entry every time
words = [fake.word() for _ in range(75)]
names = [fake.name() for _ in range(100)]
emails = [fake.email() for _ in range(100)]
sentences = [fake.sentence() for _ in range(75)]
paragraphs = [fake.paragraph() for _ in range(50)]
with transaction.atomic():
tags = Tag.objects.bulk_create([
Tag(text=word) for word in words
])
users = User.objects.bulk_create([
User(name=random.choice(names))
for _ in range(500)
])
Email.objects.bulk_create([
Email(email=random.choice(emails), user=user)
for user in users
for _ in range(random.randint(1, 10))
])
projects = Project.objects.bulk_create([
Project(title=random.choice(sentences), body=random.choice(paragraphs))
for _ in range(1000)
])
Project.tags.through.objects.bulk_create([
Project.tags.through(project=project, tag=tag)
for project in projects
for tag in random.sample(tags, random.randint(0, 50))
])
Contributor.objects.bulk_create([
Contributor(project=project, user=user)
for project in projects
for user in random.sample(users, random.randint(1, 250))
])
|
StarcoderdataPython
|
84548
|
# BSD 3-Clause License
#
# Copyright (c) 2016-19, University of Liverpool
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Parser module specific to the "Jones" sequence file format
"""
__author__ = "<NAME>"
__date__ = "13 Sep 2016"
__version__ = "0.1"
from conkit.io.a2m import A2mParser
from conkit.misc import deprecate
@deprecate("0.11", msg="Use A2mParser instead")
class JonesParser(A2mParser):
"""Parser class for Jones sequence files
This format is a "new" definition of sequence-only records.
It assumes that there are no comments, headers or any other
data in the file.
The only information present are sequences, whereby one sequence
is represented in a single line!
"""
pass
|
StarcoderdataPython
|
3272244
|
from odynn import optim, utils
import pandas as pd
import seaborn as sns
import pylab as plt
import numpy as np
from odynn.models import cfg_model
from odynn import neuron as nr
from odynn import nsimul as ns
from sklearn.decomposition import PCA
def corr(df):
corr = df.corr()
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, cmap=cmap, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
plt.show()
def scatt(df):
f, ax = plt.subplots(figsize=(6.5, 6.5))
sns.despine(f, left=True, bottom=True)
sns.scatterplot(x="loss", y="n__tau",
hue="rho_ca",
palette="autumn", linewidth=0,
data=df, ax=ax)
plt.show()
def violin(df):
# Use cubehelix to get a custom sequential palette
# pal = sns.cubehelix_palette(p, rot=-.5, dark=.3)
# Show each distribution with both violins and points
sns.violinplot(data=df, inner="points")
plt.show()
def get_df(dir):
dic = optim.get_vars(dir)
return pd.DataFrame.from_dict(dic)
def real_std(df):
df = df.copy()
mdps = [col for col in df.columns if 'mdp' in col or 'E' in col]
df = df.drop(columns=mdps)
variation = df.std() / df.mean()
d = {'Variation': abs(variation.values),
'Parameter': df.columns.values}
df2 = pd.DataFrame(d)
df2 = df2.sort_values(['Variation']).reset_index(drop=True)
mx = np.max(d['Variation'])
r = np.array([1., 0., 0.])
g = np.array([0., 1., 0.])
colors = [r * (1. - v / mx) + g * (v / mx) for v in df2['Variation']]
df2.plot.bar(x='Parameter', y='Variation', colors=colors, title='Relative standard deviation')
# ax = sns.barplot(x='Parameter', y='Variation', data=df2, palette=colors)
# plt.yscale('log')
plt.show()
def sigm():
def plot_sigm(pts, scale, col='k'):
plt.plot(pts, 1 / (1 + sp.exp((-30. - pts) / scale)), col, label='scale=%s'%scale)
import scipy as sp
pts = sp.arange(-12000, 20, 0.5)
# plot_sigm(pts, -1, col='#000000')
# plot_sigm(pts, -3, col='#440000')
# plot_sigm(pts, -10, col='#880000')
# plot_sigm(pts, -30, col='#bb0000')
# plot_sigm(pts, -100, col='#ff0000')
plot_sigm(pts, 1, col='#000000')
plot_sigm(pts, 3, col='#004400')
plot_sigm(pts, 10, col='#008800')
plot_sigm(pts, 30, col='#00bb00')
plot_sigm(pts, 1000, col='#00ff00')
plt.legend()
plt.title('Influence of $V_{scale}$ on the rate dynamics')
plt.show()
exit(0)
def table():
import re
neur = cfg_model.NEURON_MODEL
from odynn.models import celeg
dir = utils.set_dir('Integcomp_volt_mod3dt0.1-YES')
best = optim.get_best_result(dir)
for k, v in neur.default_params.items():
v = neur._constraints_dic.get(k, ['-inf', 'inf'])
u = ''
if 'tau' in k:
u = 'ms'
elif 'scale' in k or 'mdp' in k or 'E' in k:
u = 'mV'
elif 'g' in k:
u = 'mS/cm$^2$'
elif k == 'C_m':
u = '$\mu$F/cm$^2$'
else:
u = 'none'
tp = '%s &&& %s & %s&%s&%s&%s \\\\\n \\hline' % (k, v[0], v[1], u, cfg_model.NEURON_MODEL.default_params[k], best[k])
tp = re.sub('(.)__(.*) (&.*&.*&.*&.*&)', '\g<2>_\g<1> \g<3>', tp)
tp = tp.replace('inf', '$\\infty$')
tp = re.sub('scale_(.)', '$V_{scale}^\g<1>$', tp)
tp = re.sub('mdp_(.)', '$V_{mdp}^\g<1>$', tp)
tp = re.sub('tau_(.)', '$\\ tau^\g<1>$', tp)
tp = re.sub('E_(..?)', '$E_{\g<1>}$', tp)
tp = tp.replace('\\ tau', '\\tau')
tp = re.sub('g_([^ ]*) +', '$g_{\g<1>}$ ', tp)
tp = tp.replace('rho_ca', '$\\rho_{Ca}$')
tp = tp.replace('decay_ca', '$\\tau_{Ca}$')
tp = tp.replace('C_m', '$C_m$')
tp = tp.replace('alpha_h', '$\\alpha^h$')
tp = re.sub('(.*tau.*)&&&', '\g<1>&%s&%s&' % (celeg.MIN_TAU, celeg.MAX_TAU), tp)
tp = re.sub('(.*scale.*)&&&', '\g<1>&%s&%s&' % (celeg.MIN_SCALE, celeg.MAX_SCALE), tp)
print(tp)
exit(0)
def hhsimp_box(df):
utils.box(df, ['b', 'g', 'm', 'g', 'm'], ['C_m', 'g_L', 'g_K', 'E_L', 'E_K'])
plt.title('Membrane')
utils.save_show(True, True, 'boxmemb', dpi=300)
plt.subplot(3, 1, 1)
utils.box(df, ['m', '#610395'], ['a__mdp', 'b__mdp'])
plt.title('Midpoint')
plt.subplot(3, 1, 2)
utils.box(df, ['m', '#610395'], ['a__scale', 'b__scale'])
plt.title('Scale')
plt.subplot(3, 1, 3)
utils.box(df, ['m', '#610395'], ['a__tau', 'b__tau'])
plt.yscale('log')
plt.title('Time constant')
plt.tight_layout()
utils.save_show(True, True, 'boxrates', dpi=300)
def leak_box(df):
utils.box(df, ['b', 'g', 'Gold'], ['C_m', 'g_L', 'E_L'])
plt.title('Membrane')
utils.save_show(True, True, 'box1', dpi=300)
if __name__ == '__main__':
from odynn.nsimul import simul
import scipy as sp
t = sp.arange(0., 1200., 0.1)
i = 20. * ((t>400) & (t<800))
simul(t=t, i_inj=i, show=True);exit()
dir = utils.set_dir('Tapwith_dt0.5')
dic = optim.get_vars(dir, loss=False)
# df = pd.DataFrame.from_dict(dic)
# df = df.dropna()
# dfdisp = (df - df.mean()) / df.std()
# plt.plot(dfdisp.transpose())
# utils.save_show(True, True, 'dispreal', dpi=300)
dd = optim.get_vars_all(dir, losses=True)
optim.plot_loss_rate(dd['loss'], dd['rates'], dd['loss_test'], 50, show=True)
from odynn import datas
dic = optim.get_vars(dir, loss=True)
train, test = optim.get_data(dir)
print(dic['loss'])
df = pd.DataFrame(dic['loss'], columns=['loss'])
# df = pd.DataFrame.from_dict(dic)#.head(4)
df = df.sort_values('loss').reset_index(drop=True)
# df = df.dropna()
sns.barplot(x=df.index, y='loss', data=df)
# df.plot.bar(y='loss')
utils.save_show(True, True, 'lossfin_virt', dpi=300);exit()
# df = df[df['loss'] <= np.min(df['loss'])]
# hhsimp_box(df)
# cfg_model.NEURON_MODEL.boxplot_vars(dic, show=True, save=True)
dic = df.to_dict('list')
# dic = collections.OrderedDict(sorted(dic.items(), key=lambda t: t[0]))
# obj = circuit.CircuitTf.create_random(n_neuron=9, syn_keys={(i,i+1):True for i in range(8)}, gap_keys={}, n_rand=50, dt=0.1)
p = optim.get_best_result(dir)
print(p)
# p = {k: v[0] for k,v in p.items()}
for i in range(train[1].shape[-1]):
ns.comp_pars_targ(p, cfg_model.NEURON_MODEL.default_params, dt=train[0][1] - train[0][0], i_inj=train[1][:,i], suffix='virtrain%s'%i, show=True, save=True)
for i in range(test[1].shape[-1]):
ns.comp_pars_targ(p, cfg_model.NEURON_MODEL.default_params, dt=test[0][1] - test[0][0], i_inj=test[1][:,i], suffix='virtest%s'%i, show=True, save=True)
n = optim.get_model(dir)
n.init_params = dic
X = n.calculate(train[1])
Xt = n.calculate(test[1])
for i in range(X.shape[2]):
n.plot_output(train[0], train[1][:,i], X[:,:,i], [train[-1][0][:,i], train[-1][-1][:,i]], save=True, suffix='virtend%s'%i)
# for i in range(X.shape[3]):
# plt.subplot(2, 1, 1)
# plt.plot(train[-1][-1], 'r', label='train data')
# plt.plot(X[:, -1,:, i])
# plt.legend()
# plt.subplot(2, 1, 2)
# plt.plot(test[-1][-1], 'r', label='test data')
# plt.plot(Xt[:, -1,:, i])
# plt.legend()
# utils.save_show(True,True,'best_result%s'%i, dpi=300)
# for i in range(9):
# dicn = {k: v[:,i] for k,v in dic.items()}
# hhmodel.CElegansNeuron.plot_vars(dicn, show=True, save=False)
# scatt(df)
# pca = PCA()
# pca.fit(df)
# for c in pca.components_:
# for i, name in enumerate(df):
# print(name, '%.2f'%c[i])
# plt.plot(pca.explained_variance_ratio_)
# plt.show()
# sns.FacetGrid(data=df, row='C_m')
# plt.show()
# violin(df)
|
StarcoderdataPython
|
3474162
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# B, M, E, S: Beginning, Middle, End, Single 4 tags
import sys,os
import CRFPP
# linear chain CRF model path, need str input, convert unicode to str
pkg_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEFAULT_MODEL = str(os.path.join(pkg_path, "data/seg/data/crf_model"))
class Tokenizer(object):
def __init__(self, model_path = DEFAULT_MODEL):
self.model = CRFPP.Tagger("-m " + model_path)
def seg(self, text):
'''
text: String, text to be segmented;
model: path of pretrained CRFPP model,
'''
segList = []
model = self.model
model.clear()
for char in text.strip(): # char in String
char = char.strip()
if char:
model.add((char + "\to\tB").encode('utf-8'))
model.parse()
size = model.size()
xsize = model.xsize()
word = ""
for i in range(0, size):
for j in range(0, xsize):
char = model.x(i, j).decode('utf-8')
tag = model.y2(i)
if tag == 'B':
word = char
elif tag == 'M':
word += char
elif tag == 'E':
word += char
segList.append(word)
word = ""
else: # tag == 'S'
word = char
segList.append(word)
word = ""
return segList
# Create Instance of a tokenizer
# print (DEFAULT_MODEL)
tk = Tokenizer(DEFAULT_MODEL)
# Global functions for call
seg = tk.seg
|
StarcoderdataPython
|
4993407
|
class Solution(object):
def reverseVowels(self, s):
"""
:type s: str
:rtype: str
"""
s = list(s)
vowels = [(i, c) for i, c in enumerate(s) if c in 'aeiouAEIOU']
LV = len(vowels)
for vi, (i, c) in enumerate(vowels):
i2, c2 = vowels[LV-vi-1]
s[i] = c2
return ''.join(s)
print Solution().reverseVowels('leetcode')
|
StarcoderdataPython
|
1620190
|
#Crear una clase Persona que tenga como atributos el "cedula, nombre, apellido y la edad
# (definir las propiedades para poder acceder a dichos atributos)". Definir como responsabilidad
# una cuncion para mostrar ó imprimir. Crear una segunda clase Profesor que herede de la clase Persona.
# Añadir un atributo sueldo ( y su propiedad) y en la función para imprimir su sueldo. Definir un objeto de la clase
# Persona y llamar a sus funciones y propiedades. También crear un objeto de la clase Profesor y llamar a sus funciones y propiedades.
class Persona:
def inicializar(self):
self.Cedula = "402-4563452-0"
self.Nombre = "Gerald"
self.Apellido = "Bautista"
self.Ed = "18"
def mostrar(self):
print(self.Cedula)
print(self.Nombre)
print(self.Apellido)
print(self.Ed)
class Profesor(Persona):
def sueldo(self):
self.Sueldo = 25000
def imprimir(self):
print("sueldo: ", self.Sueldo)
objpersona = Persona()
objpersona.inicializar()
objpersona.mostrar()
print("---------------------------------")
objprofesor = Profesor()
objprofesor.inicializar()
objprofesor.mostrar()
objprofesor.sueldo()
objprofesor.imprimir()
|
StarcoderdataPython
|
5150642
|
class Submarine:
def __init__(self):
self.horizontal_position: int = 0
# depth (inverse axis)
self.vertical_position: int = 0
# navigation of submarine
from submarine_navigation import SubmarineNavigation
self.submarine_navigation = SubmarineNavigation(self)
def forward(self, amount: int):
self.horizontal_position += amount
def down(self, amount: int):
self.vertical_position += amount
def up(self, amount: int):
self.vertical_position -= amount
def get_multiplied_coordinates(self) -> int:
return self.horizontal_position * self.vertical_position
|
StarcoderdataPython
|
1763573
|
<reponame>usgin/nrrc-repository
from django.http import HttpResponseNotAllowed, HttpResponseForbidden
from django.contrib.auth.decorators import login_required
from metadatadb.proxy import proxyRequest, can_edit, hide_unpublished
def oneFile(req, resourceId, fileName):
allowed = [ 'GET', 'DELETE' ]
if req.method not in allowed:
return HttpResponseNotAllowed(allowed)
def getFile(req, resourceId, fileName):
response = hide_unpublished(req.user, proxyRequest(path='/metadata/record/' + resourceId + '/', method='GET'))
if response.content == '':
return HttpResponseForbidden('You do not have permission to view this resource')
else:
kwargs = {
'path': '/metadata/record/' + resourceId + '/file/' + fileName,
'method': req.method
}
return proxyRequest(**kwargs)
@login_required # Registry tracking required?
def deleteFile(req, resourceId, fileName):
if not can_edit(req.user, resourceId):
return HttpResponseForbidden('You do not have permission to edit this resource')
kwargs = {
'path': '/metadata/record/' + resourceId + '/file/' + fileName,
'method': req.method
}
return proxyRequest(**kwargs)
if req.method == 'GET': return getFile(req, resourceId, fileName)
if req.method == 'DELETE': return deleteFile(req, resourceId, fileName)
|
StarcoderdataPython
|
3420812
|
<filename>sorting/python/selection-sort.py
def sort(arr):
for i in range(len(arr)):
min_idx = i
for j in range(i+1, len(arr)):
if arr[min_idx] > arr[j]:
min_idx = j
arr[i], arr[min_idx] = arr[min_idx], arr[i]
return arr
arr = list(map(int,input("Enter Numbers: ").split()))
print(sort(arr))
|
StarcoderdataPython
|
6568355
|
<filename>src/grocsvs/stages/visualize.py
import collections
import numpy
import os
import pandas
from grocsvs import graphing
from grocsvs import step
from grocsvs import structuralvariants
from grocsvs import utilities
from grocsvs.stages import final_clustering
from grocsvs.stages import genotyping
from grocsvs.stages import call_readclouds
try:
from rpy2 import robjects as ro
from rpy2.robjects import numpy2ri
numpy2ri.activate()
except ImportError, e:
ro = None
class VisualizeStep(step.StepChunk):
@staticmethod
def get_steps(options):
if ro is None:
print(" ** rpy2 not installed correctly; skipping visualization step ** ")
return
edges = load_edges(options)
for cluster in edges["cluster"].unique():
yield VisualizeStep(options, cluster)
def __init__(self, options, cluster):
self.options = options
self.cluster = cluster
def __str__(self):
return ".".join([self.__class__.__name__, str(self.cluster)])
def outpaths(self, final):
directory = self.results_dir if final \
else self.working_dir
file_name = "event_{}.pdf".format(self.cluster)
paths = {
"visualization": os.path.join(directory, file_name)
}
return paths
def run(self):
# open(self.outpaths(final=False)["visualization"], "w")
edges = load_edges(self.options)
cluster = edges.loc[edges["cluster"]==self.cluster]
# breakpoints = get_cluster_breakpoints(self.options, self.cluster)
from rpy2.robjects import r
r.pdf(self.outpaths(final=False)["visualization"])
for sample, dataset in sorted(self.options.iter_10xdatasets()):
graphing.plot_frags(cluster, self.options, sample, dataset)
for i, row in cluster.iterrows():
plot_triangles(row, self.options)
# print "::", breakpoints
# graphing.visualize_frag_cluster(breakpoints, self.options)
r["dev.off"]()
def load_edges(options):
clustering_step = final_clustering.FinalClusterSVsStep(options)
edges = pandas.read_table(
clustering_step.outpaths(final=True)["edges"])
return edges
# genotyping_step = genotyping.MergeGenotypesStep(options)
# genotypes = pandas.read_table(
# genotyping_step.outpaths(final=True)["genotypes"])
# return genotypes
def get_cluster_breakpoints(options, cluster):
# genotyping_step = genotyping.GenotypingStep(options)
# genotypes = pandas.read_table(
# genotyping_step.outpaths(final=True)["genotypes"])
clustering_step = final_clustering.FinalClusterSVsStep(options)
edges = pandas.read_table(
clustering_step.outpaths(final=True)["edges"])
clustered_edges = edges.loc[edges["cluster"]==cluster]
breakpoints = []
for row in clustered_edges.itertuples():
breakpoints.append((row.chromx, row.x, row.chromy, row.y, row.orientation))
return breakpoints
def plot_triangles(event, options):
samples_to_mats = collections.OrderedDict()
extend_distance = 100000
startx = event.x - extend_distance
endx = event.x + extend_distance
starty = event.y - extend_distance
endy = event.y + extend_distance
startx = max(0, startx)
starty = max(0, starty)
for sample, dataset in sorted(options.iter_10xdatasets()):
sv = structuralvariants.StructuralVariant(
event.chromx, event.chromy, event.x, event.y, event.orientation)
fragsx = call_readclouds.load_fragments(
options, sample, dataset, event.chromx, startx, endx)
fragsy = call_readclouds.load_fragments(
options, sample, dataset, event.chromy, starty, endy)
_, _, merged_frags, mat, breakpoint = sv.get_sv_info(
fragsx, fragsy, ext_dist=extend_distance, winsize=100)
samples_to_mats[sample.name] = mat
scale = 1e6
cur_max = int(max(mat.max() for mat in samples_to_mats.values()) + 1)
for sample_name, mat in samples_to_mats.items():
sample = options.samples[sample_name]
dataset = sample.get_10x_dataset()
palette = get_palette(mat, 0, cur_max)
mat = convert(mat, palette)
ro.r.layout(numpy.array([[1,2],[0,0]]), widths=[9,1], heights=[10,0])
oldpar = ro.r.par(mar=numpy.array([5,5,3,0]))
main = "{} {}:{:,}::{}:{:,}{}".format(sample_name, event.chromx, event.x, event.chromy, event.y, event.orientation)
utilities.plot_matrix_as_image(mat, startx/scale, starty/scale, endx/scale, endy/scale, main=main,
xlab="{} (MB)".format(event.chromx), ylab="{} (MB)".format(event.chromy))
p = [ro.r.rgb(*palette(i)) for i in range(cur_max)]
ro.r.par(mar=numpy.array([5,2,3,0.5]))
color_bar(p, cur_max, nticks=5)
def color_bar(lut, max_, nticks=11, ticks=None, title=""):
min_ = 0
scale = float((len(lut)-1))/(max_-min_)
if ticks is None:
tick_max = max_/10*10
ticks = numpy.linspace(min_, tick_max, nticks)
ro.r.plot(numpy.array([0,10]), numpy.array([min_,max_]), type='n', bty='n', xaxt='n', xlab='', yaxt='n', ylab='', main=title)
ro.r.axis(2, ticks, las=1)
for i in range(1, len(lut)):
y = (i-1)/scale + min_
ro.r.rect(0,y,10,(y+1/scale)*1.025, col=lut[i], border=ro.NA_Logical)
def convert(matrix, fn):
converted = []
for row in range(matrix.shape[0]):
currow = []
for col in range(matrix.shape[1]):
currow.append(fn(matrix[row,col]))
converted.append(currow)
return numpy.array(converted)
def get_palette(mat=None, min_=None, max_=None):
if max_ is None:
max_ = mat.max()
if min_ is None:
min_ = mat.min()
print min_, max_
diff = float(max_ - min_ - 10)
def color(x):
if x < 10:
# return (0.8,0.8,0.8)
v = (x + 5) / 15.0
return (v,v,1)
frac = (x-min_-10)/diff
return (1, 1-frac, 1-frac)
return color
|
StarcoderdataPython
|
3560009
|
<filename>mlxtend/mlxtend/evaluate/bootstrap_outofbag.py
# <NAME> 2014-2020
# mlxtend Machine Learning Library Extensions
#
# Bootstrap functions
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import numpy as np
class BootstrapOutOfBag(object):
"""
Parameters
----------
n_splits : int (default=200)
Number of bootstrap iterations.
Must be larger than 1.
random_seed : int (default=None)
If int, random_seed is the seed used by
the random number generator.
Returns
-------
train_idx : ndarray
The training set indices for that split.
test_idx : ndarray
The testing set indices for that split.
Examples
-----------
For usage examples, please see
http://rasbt.github.io/mlxtend/user_guide/evaluate/BootstrapOutOfBag/
"""
def __init__(self, n_splits=200, random_seed=None):
self.random_seed = random_seed
if not isinstance(n_splits, int) or n_splits < 1:
raise ValueError('Number of splits must be greater than 1.')
self.n_splits = n_splits
def split(self, X, y=None, groups=None):
"""
y : array-like or None (default: None)
Argument is not used and only included as parameter
for compatibility, similar to `KFold` in scikit-learn.
groups : array-like or None (default: None)
Argument is not used and only included as parameter
for compatibility, similar to `KFold` in scikit-learn.
"""
rng = np.random.RandomState(self.random_seed)
sample_idx = np.arange(X.shape[0])
set_idx = set(sample_idx)
for _ in range(self.n_splits):
train_idx = rng.choice(sample_idx,
size=sample_idx.shape[0],
replace=True)
test_idx = np.array(list(set_idx - set(train_idx)))
yield train_idx, test_idx
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility with scikit-learn.
y : object
Always ignored, exists for compatibility with scikit-learn.
groups : object
Always ignored, exists for compatibility with scikit-learn.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
|
StarcoderdataPython
|
6677463
|
<filename>src/probnum/utils/__init__.py
from .argutils import *
from .arrayutils import *
from .fctutils import *
from .randomutils import *
# Public classes and functions. Order is reflected in documentation.
__all__ = [
"atleast_1d",
"atleast_2d",
"as_colvec",
"as_numpy_scalar",
"as_random_state",
"as_shape",
"assert_is_1d_ndarray",
"assert_is_2d_ndarray",
"assert_evaluates_to_scalar",
"derive_random_seed",
]
|
StarcoderdataPython
|
3378998
|
""" Classes and functions for configuring BIG-IQ """
# Copyright 2014 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=star-args
import json
import logging
import requests
LOG = logging.getLogger(__name__)
class BIGIQ(object):
"""An interface to a single BIG-IQ
This class allows someone to make custom HTTP requests to
a BIG-IQ or use existing well known ones as documented
in our APIs.
Every HTTP request made to a BIG-IQ returns the body of
the response which should contained the JSON representation
of the resource in the HTTP request.
"""
# URI segments for top-level BIG-IQ namespaces
MGMT_ENDPOINT_URI_SEGMENT = "mgmt"
NS_CM_URI_SEGMENT = "cm"
NS_SHARED_URI_SEGMENT = "shared"
# URI segments for any BIG-IQ module
NS_INDEX_URI_SEGMENT = "index"
NS_RESOLVER_URI_SEGMENT = "resolver"
SHARED_CONFIG_URI_SEGMENT = "config"
UTILITY_URI_SEGMENT_EXAMPLE = 'example'
# Query component identifiers used when constructing an indexer query
CONFIG_QUERY_KEY_INFLATE = 'inflate'
CONFIG_QUERY_KEY_REF_KIND = 'referenceKind'
CONFIG_QUERY_KEY_REF_LINK = 'referenceLink'
CONFIG_QUERY_KEY_REF_METHOD = 'referenceMethod'
REF_METHOD_REF_ANY = 'referenceAny'
REF_METHOD_REF_KIND = 'resourceReferencesKind'
REF_METHOD_REF_RESOURCE = 'kindReferencesResource'
# URI segments for BIG-IQ Cloud module
SUB_CM_NS_CLOUD_URI_SEGMENT = "cloud"
CLOUD_CONNECTORS_URI_SEGMENT = "connectors"
CLOUD_IAPP_URI_SEGMENTS = "iapp"
CLOUD_NODES_URI_SEGMENT = "nodes"
CLOUD_PROVIDER_URI_SEGMENT = "provider"
CLOUD_PROVIDERS_URI_SEGMENT = "providers"
CLOUD_SERVICES_URI_SEGMENT = "services"
CLOUD_TEMPLATES_URI_SEGMENT = "templates"
CLOUD_TENANTS_URI_SEGMENT = "tenants"
# Cloud Connectors
CC_TYPE_EC2 = 'ec2'
CC_TYPE_LOCAL = 'local'
CC_TYPE_OPENSTACK = 'openstack'
CC_TYPE_VMWARE_NSX = 'nsx'
CC_TYPE_VMWARE_VSHIELD = 'vmware'
# Constants used for constructing URLs
PATH_SEPARATOR = "/"
SCHEME_HTTPS = "https"
SCHEME_SEPARATOR = "://"
QUERY_COMPONENT_KV_SEPARATOR = "=" # key value separator
QUERY_COMPONENT_KVP_SEPARATOR = "&" # key value pair separator
QUERY_COMPONENT_STARTER = "?"
QUERY_COMPONENT_TERMINATOR = "#"
def __init__(self, hostname, username, password):
"""Creates an instance of a BIG-IQ
:param string hostname: The hostname of the BIG-IQ
:param string username: The Administrator user name
:param string password: The <PASSWORD>
"""
self.hostname = hostname
self.username = username
self.password = password
# Setup our HTTP session to the BIG-IQ
self.http_session = requests.Session()
self.http_session.auth = (self.username, self.password)
self.http_session.verify = False
self.http_session.headers.update({'Content-Type': 'application/json'})
# If we are able to successfully query the echo worker
# we consider ourselves connected
url = 'https://' + self.hostname + '/mgmt/shared/echo'
self.http_session.get(url).raise_for_status()
def delete(self, url):
"""Makes a HTTP DELETE request
Makes a HTTP DELETE request to the argument provided to the 'url'
parameter using the HTTP session previously established when
the instance of this BIGIQ type was created. Thus the URL is
presumed to be a resource on the BIG-IQ.
:param string url: The URL to perform a HTTP DELETE on
"""
response = self.http_session.delete(url)
response.raise_for_status()
# no json to parse on delete response
return
def get(self, url):
"""Makes a HTTP GET request
Makes a HTTP GET request to the argument provided to the 'url'
parameter using the HTTP session previously established when
the instance of this BIGIQ type was created. Thus the URL is
presumed to be a resource on the BIG-IQ.
:param string url: The URL to perform a HTTP GET on
:return: The JSON response body
"""
response = self.http_session.get(url)
response.raise_for_status()
return response.json()
def post(self, url, body):
"""Makes a HTTP POST request
Makes a HTTP POST request to the argument provided to the 'url'
parameter using the HTTP session previously established when
the instance of this BIGIQ type was created. Thus the URL is
presumed to be a resource on the BIG-IQ. The body posted is
contained in the parameter 'body'. It will be serialized to
JSON inside this method.
:param string url: The URL to perform a HTTP POST on
:param object body: An object that will be serialized to JSON
for the body
:return: The JSON response body
"""
response = self.http_session.post(url, json.dumps(body))
response.raise_for_status()
return response.json()
def put(self, url, body):
"""Makes a HTTP PUT request
Makes a HTTP PUT request to the argument provided to the 'url'
parameter using the HTTP session previously established when
the instance of this BIGIQ type was created. Thus the URL is
presumed to be a resource on the BIG-IQ. The body posted is
contained in the parameter 'body'. It will be serialized to
JSON inside this method.
:param string url: The URL to perform a HTTP PUT on
:param object body: An object that will be serialized to JSON
for the body
:return: The JSON response body
"""
response = self.http_session.put(url, json.dumps(body))
response.raise_for_status()
return response.json()
def build_bigiq_url(self, uri_path, query_component=None):
"""Builds a URL to a resource on the BIG-IQ
The URL is that of a 'https' scheme. The URI path is presumed
to be properly formed. The query component is presumed to be
properly formed.
:param string uri_path: The path of the URL
:param string query_component: The query component of the URI.
:return: URL
"""
url = BIGIQ.SCHEME_HTTPS + BIGIQ.SCHEME_SEPARATOR + \
self.hostname + uri_path
if query_component:
url += query_component
return url
@staticmethod
def build_remote_uri_path(*uri_segments):
"""Builds a URI path to a remote resource on a BIG-IQ from URI segments
URI segments can include leading or trailing path separators. If
the URI segment doesn't include a leading path separator one is
added. If the URI segment does include a trailing path separator
it is removed.
URI segments in the list should be strings. The types of the
objects provided in uri_segments isn't type checked so providing
non-string type objects may result in unexpected behavior with
the possibility of an error occurring.
The empty string will be returned if the list of URI segments is
empty.
The URI path returned will be prefixed with the 'mgmt' URI segment.
:param list uri_segments: List of URI segments of object type string.
:return: URI path
"""
uri_path = ""
if not uri_segments:
return uri_path
for uri_segment in uri_segments:
# Skip the URI segment if it is empty
if not uri_segment:
continue
# Add the URI segment with a leading '/' if it doesn't have one
if uri_segment[0] == BIGIQ.PATH_SEPARATOR:
uri_path += uri_segment
else:
uri_path += BIGIQ.PATH_SEPARATOR + uri_segment
# Chop off the trailing '/' on the URI segment if it had one
if uri_path[-1] == BIGIQ.PATH_SEPARATOR:
uri_path = uri_path[:-1]
start_path = BIGIQ.PATH_SEPARATOR + BIGIQ.MGMT_ENDPOINT_URI_SEGMENT
if uri_path and not uri_path.startswith(start_path):
uri_path = start_path + uri_path
return uri_path
@staticmethod
def build_query_component(**key_value_pairs):
"""Builds a query component to be used in a URL
Takes a dictionary and from the KvPs in the dictionary
builds a query string made out of the KvPs.
:param dict key_value_pairs: The KvPs to turn into the query component
:return: string that can be used as the query component in an URL
"""
if not key_value_pairs:
return ""
query_component = BIGIQ.QUERY_COMPONENT_STARTER
for key, value in key_value_pairs.items():
# Skip the key if it is empty
if not key:
continue
add_component = key + BIGIQ.QUERY_COMPONENT_KV_SEPARATOR + \
value + BIGIQ.QUERY_COMPONENT_KVP_SEPARATOR
# Add the key value pair to the query string
query_component += add_component
# Chop off the trailing '&' on the query component
query_component = query_component[:-1]
# Terminate the query component with the '#' character
query_component += BIGIQ.QUERY_COMPONENT_TERMINATOR
return query_component
def get_related(self, kind, self_link, inflate=False):
"""Makes an indexer query to get all kinds related by a reference
:param string kind: The kind of object we are interested in
:param string self_link: The 'selfLink' property on the resource
referencing the objects
:param boolean inflate: Whether the results should be inflated
or not (default is 'False')
:return: List of all referenced objects serialized to JSON
"""
uri_path = BIGIQ.build_remote_uri_path(
BIGIQ.NS_SHARED_URI_SEGMENT,
BIGIQ.NS_INDEX_URI_SEGMENT,
BIGIQ.SHARED_CONFIG_URI_SEGMENT)
query_component = BIGIQ.build_query_component(**{
BIGIQ.CONFIG_QUERY_KEY_REF_KIND: kind,
BIGIQ.CONFIG_QUERY_KEY_REF_LINK: self_link,
BIGIQ.CONFIG_QUERY_KEY_REF_METHOD:
BIGIQ.REF_METHOD_REF_KIND,
BIGIQ.CONFIG_QUERY_KEY_INFLATE: '%s' % inflate})
url = self.build_bigiq_url(uri_path, query_component)
response = self.get(url)
return response.get('items', [])
def get_resource_example(self, uri_path):
"""Gets the example of a resource
:param string uri_path: The resource to get the example of
:return: Example of the resource serialized to JSON
"""
uri_path = BIGIQ.build_remote_uri_path(
uri_path, BIGIQ.UTILITY_URI_SEGMENT_EXAMPLE)
url = self.build_bigiq_url(uri_path)
return self.get(url)
def get_cloud_connectors(self, connector_type):
"""Gets all the cloud connectors of a specific type
:param string connector_type: The type of the connector to get
(e.g. 'openstack', 'ec2', etc.)
:return: List of connectors serialized to JSON
"""
uri_path = BIGIQ.build_remote_uri_path(
BIGIQ.NS_CM_URI_SEGMENT,
BIGIQ.SUB_CM_NS_CLOUD_URI_SEGMENT,
BIGIQ.CLOUD_CONNECTORS_URI_SEGMENT,
connector_type)
url = self.build_bigiq_url(uri_path)
try:
response = self.get(url)
except requests.exceptions.HTTPError as httperr:
if '404' in str(httperr):
LOG.debug("No cloud connectors found: %s" % str(httperr))
return []
else:
LOG.error("ERROR: getting cloud connectors")
raise
return response.get('items', [])
def post_cloud_connector(self, connector_type, connector):
"""Creates a cloud connector of a specific type
:param string connector_type: The type of the connector to create
(e.g. 'openstack', 'ec2', etc.)
:param dict connector: A dictionary representing the connector to be
used in the POST body
:return: Created connector serialized to JSON
"""
uri_path = BIGIQ.build_remote_uri_path(
BIGIQ.NS_CM_URI_SEGMENT,
BIGIQ.SUB_CM_NS_CLOUD_URI_SEGMENT,
BIGIQ.CLOUD_CONNECTORS_URI_SEGMENT,
connector_type)
url = self.build_bigiq_url(uri_path)
LOG.debug("Posting Cloud Connector, URL: %s body: %s"
% (url, connector))
return self.post(url, connector)
def post_cloud_device(
self, ip_address, username, password, auto_update=True):
"""Adds a cloud device for management
:param string ip_address: The address of the device
:param string username: The username to use when authenticating the
device
:param string password: The password to use when authenticating the
device
:param boolean auto_update: Whether the device should be updated
when managed (defaults to True)
:return: The managed device serialized to JSON
"""
uri_path = BIGIQ.build_remote_uri_path(
BIGIQ.NS_SHARED_URI_SEGMENT,
BIGIQ.NS_RESOLVER_URI_SEGMENT,
'device-groups',
'cm-cloud-managed-devices',
'devices')
url = self.build_bigiq_url(uri_path)
body = {}
body['address'] = ip_address
body['userName'] = username
body['password'] = password
body['rootUser'] = 'root'
body['rootPassword'] = '<PASSWORD>'
body['automaticallyUpdateFramework'] = auto_update
LOG.debug("Posting Cloud Device, URL: %s body: %s"
% (url, body))
return self.post(url, body)
def get_provider_template(self, provider_template_name):
"""Get a provider template
:param string provider_template_name: The name of the provider
template to get
:return: The provider template serialized to JSON
"""
uri_path = BIGIQ.build_remote_uri_path(
BIGIQ.NS_CM_URI_SEGMENT,
BIGIQ.SUB_CM_NS_CLOUD_URI_SEGMENT,
BIGIQ.CLOUD_PROVIDER_URI_SEGMENT,
BIGIQ.CLOUD_TEMPLATES_URI_SEGMENT,
BIGIQ.CLOUD_IAPP_URI_SEGMENTS,
provider_template_name)
url = self.build_bigiq_url(uri_path)
return self.get(url)
def post_provider_template(self, provider_template):
"""Creates a provider template
:param dict provider_template: A dictionary representing the
provider template to be used in the POST body
:return: Created provider template serialized to JSON
"""
uri_path = BIGIQ.build_remote_uri_path(
BIGIQ.NS_CM_URI_SEGMENT,
BIGIQ.SUB_CM_NS_CLOUD_URI_SEGMENT,
BIGIQ.CLOUD_PROVIDER_URI_SEGMENT,
BIGIQ.CLOUD_TEMPLATES_URI_SEGMENT,
BIGIQ.CLOUD_IAPP_URI_SEGMENTS)
url = self.build_bigiq_url(uri_path)
return self.post(url, provider_template)
def post_tenant(self, tenant):
"""Creates a tenant
:param dict connector: A dictionary representing the tenant to be
used in the POST body
:return: Created tenant serialized to JSON
"""
uri_path = BIGIQ.build_remote_uri_path(
BIGIQ.NS_CM_URI_SEGMENT,
BIGIQ.SUB_CM_NS_CLOUD_URI_SEGMENT,
BIGIQ.CLOUD_TENANTS_URI_SEGMENT)
url = self.build_bigiq_url(uri_path)
return self.post(url, tenant)
def delete_tenant_service(self, tenant_name, service_name):
"""Deletes a tenant service
:param string tenant_name: The name of the tenant to delete a
service for
:param string service_name: The name of the service to delete
:return: The deleted tenant service serialized to JSON
"""
uri_path = BIGIQ.build_remote_uri_path(
BIGIQ.NS_CM_URI_SEGMENT,
BIGIQ.SUB_CM_NS_CLOUD_URI_SEGMENT,
BIGIQ.CLOUD_TENANTS_URI_SEGMENT,
tenant_name,
BIGIQ.CLOUD_SERVICES_URI_SEGMENT,
BIGIQ.CLOUD_IAPP_URI_SEGMENTS,
service_name)
url = self.build_bigiq_url(uri_path)
return self.delete(url)
def get_tenant_service(self, tenant_name, service_name):
"""Gets a tenant service
:param string tenant_name: The name of the tenant to get a service for
:param string service_name: The name of the service to get
:return: The tenant service serialized to JSON
"""
uri_path = BIGIQ.build_remote_uri_path(
BIGIQ.NS_CM_URI_SEGMENT,
BIGIQ.SUB_CM_NS_CLOUD_URI_SEGMENT,
BIGIQ.CLOUD_TENANTS_URI_SEGMENT,
tenant_name,
BIGIQ.CLOUD_SERVICES_URI_SEGMENT,
BIGIQ.CLOUD_IAPP_URI_SEGMENTS,
service_name)
url = self.build_bigiq_url(uri_path)
return self.get(url)
def post_tenant_service(self, tenant_name, service):
"""Creates a tenant service
:param string tenant_name: The name of the tenant to update a
service for
:param dict service: A dictionary representing the tenant service
to be used in the POST body
:return: Created tenant service serialized to JSON
"""
uri_path = BIGIQ.build_remote_uri_path(
BIGIQ.NS_CM_URI_SEGMENT,
BIGIQ.SUB_CM_NS_CLOUD_URI_SEGMENT,
BIGIQ.CLOUD_TENANTS_URI_SEGMENT,
tenant_name,
BIGIQ.CLOUD_SERVICES_URI_SEGMENT,
BIGIQ.CLOUD_IAPP_URI_SEGMENTS)
url = self.build_bigiq_url(uri_path)
return self.post(url, service)
def put_tenant_service(self, tenant_name, service_name, service):
"""Updates a tenant service by full replacement
:param string tenant_name: The name of the tenant to update a
service for
:param string service_name: The name of the service to update
:param dict service: A dictionary representing the tenant service
to be used in the PUT body
:return: Updated tenant service serialized to JSON
"""
uri_path = BIGIQ.build_remote_uri_path(
BIGIQ.NS_CM_URI_SEGMENT,
BIGIQ.SUB_CM_NS_CLOUD_URI_SEGMENT,
BIGIQ.CLOUD_TENANTS_URI_SEGMENT,
tenant_name,
BIGIQ.CLOUD_SERVICES_URI_SEGMENT,
BIGIQ.CLOUD_IAPP_URI_SEGMENTS,
service_name)
url = self.build_bigiq_url(uri_path)
return self.put(url, service)
|
StarcoderdataPython
|
4938794
|
<reponame>GQAssurance/selenium
SE_VERSION = "4.0.0-alpha-3"
|
StarcoderdataPython
|
12804803
|
from samplemodule import message
def test_message():
assert message == 'Hello World'
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.