id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
209933
|
<reponame>billyrrr/onto
from onto.attrs import attrs
from onto.models.base import Serializable as ValueObject
class AwardPool(ValueObject):
award_pool_id: int = attrs.doc_id
awards = attrs.set(attrs.embed('Award'))
class Award(ValueObject):
award_id: int = attrs.doc_id
probability: int = attrs.nothing
class DrawLotteryContext:
mt_city_info = attrs.embed('MtCity')
game_score: str = attrs.nothing
lon: str = attrs.nothing
lat: str = attrs.nothing
user_id: int = attrs.nothing
lottery_id: int = attrs.nothing
@lottery_id.getter
def lottery_id(self):
return 123
class IssueResponse(ValueObject):
code: int = attrs.nothing
prize_info = attrs.embed('PrizeInfo') # TODO: PrizeInfo
|
StarcoderdataPython
|
384024
|
<reponame>neuro-inc/neuro-cli
from datetime import datetime, timezone
from decimal import Decimal
from typing import AsyncIterator, Callable
import pytest
from aiohttp import web
from yarl import URL
from neuro_sdk import Action, Client, Permission, Quota, ResourceNotFound
from tests import _TestServerFactory
_MakeClient = Callable[..., Client]
@pytest.fixture()
async def mocked_share_client(
aiohttp_server: _TestServerFactory, make_client: _MakeClient
) -> AsyncIterator[Client]:
async def handler(request: web.Request) -> web.Response:
data = await request.json()
assert data[0]["action"] in [item.value for item in Action]
data[0]["action"] = Action.MANAGE.value
return web.json_response(data, status=web.HTTPCreated.status_code)
app = web.Application()
app.router.add_post("/users/bill/permissions", handler)
srv = await aiohttp_server(app)
client = make_client(srv.make_url("/"))
yield client
await client.close()
@pytest.fixture()
async def mocked_revoke_client(
aiohttp_server: _TestServerFactory, make_client: _MakeClient
) -> AsyncIterator[Client]:
async def handler(request: web.Request) -> web.Response:
assert "uri" in request.query
raise web.HTTPNoContent()
app = web.Application()
app.router.add_delete("/users/bill/permissions", handler)
srv = await aiohttp_server(app)
client = make_client(srv.make_url("/"))
yield client
await client.close()
@pytest.fixture()
async def mocked_add_role_client(
aiohttp_server: _TestServerFactory, make_client: _MakeClient
) -> AsyncIterator[Client]:
async def handler(request: web.Request) -> web.Response:
data = await request.json()
assert data["name"].startswith("mycompany/")
raise web.HTTPCreated()
app = web.Application()
app.router.add_post("/users", handler)
srv = await aiohttp_server(app)
client = make_client(srv.make_url("/"))
yield client
await client.close()
@pytest.fixture()
async def mocked_remove_role_client(
aiohttp_server: _TestServerFactory, make_client: _MakeClient
) -> AsyncIterator[Client]:
async def handler(request: web.Request) -> web.Response:
assert request.match_info["name"].startswith("mycompany:")
raise web.HTTPNoContent()
app = web.Application()
app.router.add_delete("/users/{name}", handler)
srv = await aiohttp_server(app)
client = make_client(srv.make_url("/"))
yield client
await client.close()
@pytest.fixture()
async def mocked_get_quota_client(
aiohttp_server: _TestServerFactory, make_client: _MakeClient
) -> AsyncIterator[Client]:
date = datetime.now(timezone.utc)
async def handle_get_cluster_user(request: web.Request) -> web.StreamResponse:
data = {
"user_name": "denis",
"role": "admin",
"user_info": {
"first_name": "denis",
"last_name": "admin",
"email": "<EMAIL>",
"created_at": date.isoformat(),
},
"balance": {
"credits": "500",
"spent_credits": "10",
},
"quota": {"total_running_jobs": 10},
}
return web.json_response(data)
app = web.Application()
app.router.add_get(
"/apis/admin/v1/clusters/{cluster_name}/users/{username}",
handle_get_cluster_user,
)
srv = await aiohttp_server(app)
client = make_client(srv.make_url("/api/v1"))
yield client
await client.close()
@pytest.fixture()
async def mocked_get_subroles_client(
aiohttp_server: _TestServerFactory, make_client: _MakeClient
) -> AsyncIterator[Client]:
async def handle_get_subroles(request: web.Request) -> web.StreamResponse:
username = request.match_info["username"]
data = {
"subroles": [f"{username}/sub1", f"{username}/sub2", f"{username}/sub3"]
}
return web.json_response(data)
app = web.Application()
app.router.add_get(
"/api/v1/users/{username}/subroles",
handle_get_subroles,
)
srv = await aiohttp_server(app)
client = make_client(srv.make_url("/api/v1"))
yield client
await client.close()
class TestUsers:
async def test_get_quota(self, mocked_get_quota_client: Client) -> None:
res = await mocked_get_quota_client.users.get_quota()
assert res == Quota(credits=Decimal("500"), total_running_jobs=10)
async def test_get_quota_adminless(self, make_client: _MakeClient) -> None:
async with make_client("https://dev.example.com", admin_url="") as client:
quota = await client.users.get_quota()
assert quota.credits is None
assert quota.total_running_jobs is None
async def test_share_unknown_user(self, mocked_share_client: Client) -> None:
with pytest.raises(ResourceNotFound):
await mocked_share_client.users.share(
user="not-exists",
permission=Permission(URL("storage://bob/resource"), Action.READ),
)
async def test_share_invalid_name(self, mocked_share_client: Client) -> None:
with pytest.raises(ValueError):
await mocked_share_client.users.share(
user="mycompany/team:role",
permission=Permission(URL("storage://bob/resource"), Action.READ),
)
async def test_correct_share(self, mocked_share_client: Client) -> None:
ret = await mocked_share_client.users.share(
user="bill",
permission=Permission(URL("storage://bob/resource"), Action.READ),
)
assert ret == Permission(URL("storage://bob/resource"), Action.MANAGE)
async def test_revoke_unknown_user(self, mocked_revoke_client: Client) -> None:
with pytest.raises(ResourceNotFound):
await mocked_revoke_client.users.revoke(
user="not-exists", uri=URL("storage://bob/resource")
)
async def test_revoke_invalid_name(self, mocked_revoke_client: Client) -> None:
with pytest.raises(ValueError):
await mocked_revoke_client.users.revoke(
user="mycompany/team:role", uri=URL("storage://bob/resource")
)
async def test_correct_revoke(self, mocked_revoke_client: Client) -> None:
ret = await mocked_revoke_client.users.revoke(
user="bill", uri=URL("storage://bob/resource")
)
assert ret is None # at this moment no result
async def test_add_role(self, mocked_add_role_client: Client) -> None:
ret = await mocked_add_role_client.users.add("mycompany/team/role")
assert ret is None # at this moment no result
async def test_remove_role(self, mocked_remove_role_client: Client) -> None:
ret = await mocked_remove_role_client.users.remove("mycompany/team/role")
assert ret is None # at this moment no result
async def test_get_subroles(self, mocked_get_subroles_client: Client) -> None:
res = await mocked_get_subroles_client.users.get_subroles("test")
assert set(res) == {"test/sub1", "test/sub2", "test/sub3"}
async def test_remove_role_invalid_name(
self, mocked_remove_role_client: Client
) -> None:
with pytest.raises(ValueError):
await mocked_remove_role_client.users.remove("mycompany/team:role")
|
StarcoderdataPython
|
3315407
|
from copy import deepcopy
from logging import getLogger
import joblib
import numpy as np
import sklearn
import xarray as xr
from replay_trajectory_classification.bins import (atleast_2d, get_centers,
get_grid, get_track_grid,
get_track_interior)
from replay_trajectory_classification.core import (_acausal_decode,
_causal_decode, mask,
scaled_likelihood)
from replay_trajectory_classification.initial_conditions import \
uniform_on_track
from replay_trajectory_classification.misc import NumbaKDE
from replay_trajectory_classification.multiunit_likelihood import (
estimate_multiunit_likelihood, fit_multiunit_likelihood)
from replay_trajectory_classification.spiking_likelihood import (
estimate_place_fields, estimate_spiking_likelihood)
from replay_trajectory_classification.state_transition import \
CONTINUOUS_TRANSITIONS
from sklearn.base import BaseEstimator
logger = getLogger(__name__)
sklearn.set_config(print_changed_only=False)
_DEFAULT_CLUSTERLESS_MODEL_KWARGS = dict(
bandwidth=np.array([24.0, 24.0, 24.0, 24.0, 6.0, 6.0]))
_DEFAULT_TRANSITIONS = ['random_walk', 'uniform', 'identity']
class _DecoderBase(BaseEstimator):
def __init__(self, place_bin_size=2.0, replay_speed=40, movement_var=0.05,
position_range=None, transition_type='random_walk',
initial_conditions_type='uniform_on_track',
infer_track_interior=True):
self.place_bin_size = place_bin_size
self.replay_speed = replay_speed
self.movement_var = movement_var
self.position_range = position_range
self.transition_type = transition_type
self.initial_conditions_type = initial_conditions_type
self.infer_track_interior = infer_track_interior
if 2 * np.sqrt(replay_speed * movement_var) < place_bin_size:
logger.warning('Place bin size is too small for a random walk '
'continuous state transition')
def fit_place_grid(self, position, track_graph=None,
edge_order=None, edge_spacing=15,
infer_track_interior=True, is_track_interior=None):
if track_graph is None:
(self.edges_, self.place_bin_edges_, self.place_bin_centers_,
self.centers_shape_) = get_grid(
position, self.place_bin_size, self.position_range,
self.infer_track_interior)
self.infer_track_interior = infer_track_interior
if is_track_interior is None and self.infer_track_interior:
self.is_track_interior_ = get_track_interior(
position, self.edges_)
elif is_track_interior is None and not self.infer_track_interior:
self.is_track_interior_ = np.ones(
self.centers_shape_, dtype=np.bool)
else:
(
self.place_bin_centers_,
self.place_bin_edges_,
self.is_track_interior_,
self.distance_between_nodes_,
self.centers_shape_,
self.edges_,
self.track_graph_with_bin_centers_edges_,
self.original_nodes_df_,
self.place_bin_edges_nodes_df_,
self.place_bin_centers_nodes_df_,
self.nodes_df_
) = get_track_grid(track_graph, edge_order,
edge_spacing, self.place_bin_size)
def fit_initial_conditions(self, position=None):
logger.info('Fitting initial conditions...')
self.initial_conditions_ = uniform_on_track(self.place_bin_centers_,
self.is_track_interior_)
def fit_state_transition(
self, position, is_training=None, replay_speed=None,
transition_type='random_walk'):
logger.info('Fitting state transition...')
if is_training is None:
is_training = np.ones((position.shape[0],), dtype=np.bool)
is_training = np.asarray(is_training).squeeze()
if replay_speed is not None:
self.replay_speed = replay_speed
self.transition_type = transition_type
try:
self.state_transition_ = CONTINUOUS_TRANSITIONS[transition_type](
self.place_bin_centers_, self.is_track_interior_,
position, self.edges_, is_training, self.replay_speed,
self.position_range, self.movement_var,
np.asarray(self.place_bin_centers_nodes_df_.node_id),
self.distance_between_nodes_)
except AttributeError:
self.state_transition_ = CONTINUOUS_TRANSITIONS[transition_type](
self.place_bin_centers_, self.is_track_interior_,
position, self.edges_, is_training, self.replay_speed,
self.position_range, self.movement_var,
None, None)
def fit(self):
raise NotImplementedError
def predict(self):
raise NotImplementedError
def save_model(self, filename='model.pkl'):
joblib.dump(self, filename)
@staticmethod
def load_model(filename='model.pkl'):
return joblib.load(filename)
def copy(self):
return deepcopy(self)
def convert_results_to_xarray(self, results, time):
n_position_dims = self.place_bin_centers_.shape[1]
n_time = time.shape[0]
if n_position_dims > 1:
dims = ['time', 'x_position', 'y_position']
coords = dict(
time=time,
x_position=get_centers(self.edges_[0]),
y_position=get_centers(self.edges_[1]),
)
else:
dims = ['time', 'position']
coords = dict(
time=time,
position=get_centers(self.edges_[0]),
)
new_shape = (n_time, *self.centers_shape_)
try:
results = xr.Dataset(
{key: (dims, mask(value,
self.is_track_interior_.ravel(order='F'))
.reshape(new_shape).swapaxes(-1, -2))
for key, value in results.items()},
coords=coords)
except ValueError:
results = xr.Dataset(
{key: (dims, mask(value,
self.is_track_interior_.ravel(order='F'))
.reshape(new_shape))
for key, value in results.items()},
coords=coords)
return results
class SortedSpikesDecoder(_DecoderBase):
def __init__(self, place_bin_size=2.0, replay_speed=40, movement_var=0.05,
position_range=None, knot_spacing=10,
spike_model_penalty=1E1,
transition_type='random_walk',
initial_conditions_type='uniform_on_track',
infer_track_interior=True):
'''
Attributes
----------
place_bin_size : float, optional
Approximate size of the position bins.
replay_speed : int, optional
How many times faster the replay movement is than normal movement.
movement_var : float, optional
How far the animal is can move in one time bin during normal
movement.
position_range : sequence, optional
A sequence of `n_position_dims`, each an optional (lower, upper)
tuple giving the outer bin edges for position.
An entry of None in the sequence results in the minimum and maximum
values being used for the corresponding dimension.
The default, None, is equivalent to passing a tuple of
`n_position_dims` None values.
knot_spacing : float, optional
spike_model_penalty : float, optional
transition_type : ('empirical_movement' | 'random_walk' |
'uniform', 'identity')
initial_conditions_type : ('uniform' | 'uniform_on_track')
infer_track_interior : bool, optional
'''
super().__init__(place_bin_size, replay_speed, movement_var,
position_range, transition_type,
initial_conditions_type, infer_track_interior)
self.knot_spacing = knot_spacing
self.spike_model_penalty = spike_model_penalty
def fit_place_fields(self, position, spikes, is_training=None):
logger.info('Fitting place fields...')
if is_training is None:
is_training = np.ones((position.shape[0],), dtype=np.bool)
is_training = np.asarray(is_training).squeeze()
self.place_fields_ = estimate_place_fields(
position[is_training],
spikes[is_training],
self.place_bin_centers_,
self.place_bin_edges_,
penalty=self.spike_model_penalty,
knot_spacing=self.knot_spacing)
def plot_place_fields(self, sampling_frequency=1, col_wrap=5):
'''Plots the fitted 2D place fields for each neuron.
Parameters
----------
sampling_frequency : float, optional
col_wrap : int, optional
Returns
-------
g : xr.plot.FacetGrid instance
'''
try:
g = (self.place_fields_.unstack('position') * sampling_frequency
).plot(x='x_position', y='y_position', col='neuron',
hue='encoding_group', col_wrap=col_wrap)
except ValueError:
g = (self.place_fields_ * sampling_frequency).plot(
x='position', col='neuron', hue='encoding_group',
col_wrap=col_wrap)
return g
def fit(self, position, spikes, is_training=None, is_track_interior=None,
track_graph=None, edge_order=None,
edge_spacing=15):
'''
Parameters
----------
position : ndarray, shape (n_time, n_position_dims)
spikes : ndarray, shape (n_time, n_neurons)
is_training : None or bool ndarray, shape (n_time), optional
Time bins to be used for encoding.
is_track_interior : None or bool ndaarray, shape (n_x_bins, n_y_bins)
track_graph : networkx.Graph
edge_order : array_like
edge_spacing : None, float or array_like
Returns
-------
self
'''
position = atleast_2d(np.asarray(position))
spikes = np.asarray(spikes)
self.fit_place_grid(position, track_graph,
edge_order, edge_spacing,
self.infer_track_interior, is_track_interior)
self.fit_initial_conditions(position)
self.fit_state_transition(
position, is_training, transition_type=self.transition_type)
self.fit_place_fields(position, spikes, is_training)
return self
def predict(self, spikes, time=None, is_compute_acausal=True):
'''
Parameters
----------
spikes : ndarray, shape (n_time, n_neurons)
time : ndarray or None, shape (n_time,), optional
is_compute_acausal : bool, optional
Returns
-------
results : xarray.Dataset
'''
spikes = np.asarray(spikes)
results = {}
results['likelihood'] = scaled_likelihood(
estimate_spiking_likelihood(
spikes, np.asarray(self.place_fields_)))
results['causal_posterior'] = _causal_decode(
self.initial_conditions_, self.state_transition_,
results['likelihood'])
if is_compute_acausal:
results['acausal_posterior'] = (
_acausal_decode(results['causal_posterior'][..., np.newaxis],
self.state_transition_))
n_time = spikes.shape[0]
if time is None:
time = np.arange(n_time)
return self.convert_results_to_xarray(results, time)
class ClusterlessDecoder(_DecoderBase):
'''
Attributes
----------
place_bin_size : float, optional
Approximate size of the position bins.
replay_speed : int, optional
How many times faster the replay movement is than normal movement.
movement_var : float, optional
How far the animal is can move in one time bin during normal
movement.
position_range : sequence, optional
A sequence of `n_position_dims`, each an optional (lower, upper)
tuple giving the outer bin edges for position.
An entry of None in the sequence results in the minimum and maximum
values being used for the corresponding dimension.
The default, None, is equivalent to passing a tuple of
`n_position_dims` None values.
model : scikit-learn density estimator, optional
model_kwargs : dict, optional
occupancy_model : scikit-learn density estimator, optional
occupancy_kwargs : dict, optional
transition_type : ('empirical_movement' | 'random_walk' |
'uniform', 'identity')
initial_conditions_type : ('uniform' | 'uniform_on_track')
'''
def __init__(self, place_bin_size=2.0, replay_speed=40, movement_var=0.05,
position_range=None, model=NumbaKDE,
model_kwargs=_DEFAULT_CLUSTERLESS_MODEL_KWARGS,
occupancy_model=None, occupancy_kwargs=None,
transition_type='random_walk',
initial_conditions_type='uniform_on_track',
infer_track_interior=True):
super().__init__(place_bin_size, replay_speed, movement_var,
position_range, transition_type,
initial_conditions_type, infer_track_interior)
self.model = model
self.model_kwargs = model_kwargs
if occupancy_model is None:
self.occupancy_model = model
self.occupancy_kwargs = model_kwargs
else:
self.occupancy_model = occupancy_model
self.occupancy_kwargs = occupancy_kwargs
def fit_multiunits(self, position, multiunits, is_training=None):
'''
Parameters
----------
position : array_like, shape (n_time, n_position_dims)
multiunits : array_like, shape (n_time, n_marks, n_electrodes)
is_training : None or array_like, shape (n_time,)
'''
logger.info('Fitting multiunits...')
if is_training is None:
is_training = np.ones((position.shape[0],), dtype=np.bool)
is_training = np.asarray(is_training).squeeze()
(self.joint_pdf_models_, self.ground_process_intensities_,
self.occupancy_, self.mean_rates_) = fit_multiunit_likelihood(
position[is_training], multiunits[is_training],
self.place_bin_centers_, self.model, self.model_kwargs,
self.occupancy_model, self.occupancy_kwargs,
self.is_track_interior_.ravel(order='F'))
def fit(self, position, multiunits, is_training=None,
is_track_interior=None, track_graph=None,
edge_order=None, edge_spacing=15):
'''
Parameters
----------
position : array_like, shape (n_time, n_position_dims)
multiunits : array_like, shape (n_time, n_marks, n_electrodes)
is_training : None or array_like, shape (n_time,)
is_track_interior : None or ndarray, shape (n_x_bins, n_y_bins)
track_graph : networkx.Graph
edge_order : array_like
edge_spacing : None, float or array_like
Returns
-------
self
'''
position = atleast_2d(np.asarray(position))
multiunits = np.asarray(multiunits)
self.fit_place_grid(position, track_graph,
edge_order, edge_spacing,
self.infer_track_interior, is_track_interior)
self.fit_initial_conditions(position)
self.fit_state_transition(
position, is_training, transition_type=self.transition_type)
self.fit_multiunits(position, multiunits, is_training)
return self
def predict(self, multiunits, time=None, is_compute_acausal=True):
'''
Parameters
----------
multiunits : array_like, shape (n_time, n_marks, n_electrodes)
time : None or ndarray, shape (n_time,)
is_compute_acausal : bool, optional
Use future information to compute the posterior.
Returns
-------
results : xarray.Dataset
'''
multiunits = np.asarray(multiunits)
results = {}
results['likelihood'] = scaled_likelihood(
estimate_multiunit_likelihood(
multiunits, self.place_bin_centers_,
self.joint_pdf_models_, self.ground_process_intensities_,
self.occupancy_, self.mean_rates_,
self.is_track_interior_.ravel(order='F')))
results['causal_posterior'] = _causal_decode(
self.initial_conditions_, self.state_transition_,
results['likelihood'])
if is_compute_acausal:
results['acausal_posterior'] = (
_acausal_decode(results['causal_posterior'][..., np.newaxis],
self.state_transition_))
n_time = multiunits.shape[0]
if time is None:
time = np.arange(n_time)
return self.convert_results_to_xarray(results, time)
|
StarcoderdataPython
|
5056846
|
#!/usr/bin/env python3
import sys
import time
import argparse
import subprocess
import logging as log
from pathlib import Path
import pandas as pd
from Bio import SeqIO
from flanker import cluster, salami
start = time.time()
__author__ = "<NAME>, <NAME>"
# arguments for the script
def get_arguments():
parser = argparse.ArgumentParser(description = 'flanker',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
required = parser.add_argument_group('required arguments')
# input fasta file
required.add_argument('-i', '--fasta_file', action = 'store',
required = True,
help = 'Input fasta file')
# gene(s) to annotate
genes=parser.add_mutually_exclusive_group(required=True)
genes.add_argument('-g', '--gene', nargs='+', action = 'store',
help = 'Gene(s) of interest (escape any special characters). Use space seperation for multipe genes')
genes.add_argument('-log','--list_of_genes',action='store',default=False,
help = 'Line separated file containing genes of interest')
# flanks desired
parser.add_argument('-f','--flank', action='store',
help='Choose which side(s) of the gene to extract (upstream/downstream/both)',
default='both')
# running mode
parser.add_argument('-m', '--mode',action='store',
help = 'One of "default" - normal mode, "mm" - multi-allelic cluster, or "sm" - salami-mode',
default = "default")
# is sequence circularised?
parser.add_argument('-circ', '--circ', action = 'store_true',
help = 'Is sequence circularised')
# include gene in output sequence?
parser.add_argument('-inc', '--include_gene', action = 'store_true',
help = 'Include the gene of interest')
# specify abricate database
parser.add_argument('-db', '--database', action = 'store',
help = 'Choose Abricate database e.g. NCBI, resfinder',
default = 'ncbi')
# output verbosity
parser.add_argument("-v", "--verbose", const=1, default=0, type=int, nargs="?",
help="Increase verbosity: 0 = only warnings, 1 = info, 2 = debug. No number means info. Default is no verbosity.")
# window arguments
window=parser.add_argument_group('window options')
window.add_argument('-w', '--window', action = 'store', type=int,
help = 'Length of flanking sequence/first window length',
default = 1000)
window.add_argument('-wstop', '--window_stop', action='store',type=int,
help = 'Final window length',
default = None)
window.add_argument('-wstep', '--window_step', action='store',type=int,
help = 'Step in window sequence',
default = None)
# clustering options
cluster=parser.add_argument_group('clustering options')
cluster.add_argument('-cl','--cluster',help='Turn on clustering mode?',action='store_true'),
cluster.add_argument('-o', '--outfile',action='store',help='Prefix for the clustering file',default='out'),
cluster.add_argument('-tr', '--threshold',action='store',help='mash distance threshold for clustering',default="0.001"),
cluster.add_argument('-p', '--threads',action='store',help='threads for mash to use',default='1'),
cluster.add_argument('-k', '--kmer_length',action='store', help='kmer length for Mash',default='21'),
cluster.add_argument('-s', '--sketch_size',action='store', help='sketch size for mash',default='1000')
args = parser.parse_args(None if sys.argv[1:] else ['-h'])
return args
# validate input
def check_input(fasta_file):
fasta_records = list(SeqIO.parse(fasta_file, 'fasta'))
assert len(fasta_records) >= 1, 'No records found in fasta file'
# annotate for gene(s) of interest
def run_abricate(file):
args=get_arguments()
abricate_command = ["abricate", "--db", args.database, file] # shell commands
p = subprocess.Popen(abricate_command, stdout = subprocess.PIPE, stderr = subprocess.PIPE) # run abricate
out, _ = p.communicate() # read stdout data
out = out.decode() # decode from unicode
o = open(str(file + '_resfinder'),'w') # create output file
o.write(out) # write output file
o.close() # close output
# returns the start and end positions of the annotation
def flank_positions(data, gene_):
gene = data[data["GENE"].str.match(gene_)]
# check if gene is found
if len(gene) == 0:
return True
g = gene['GENE'].iloc[0]
# LHS flank
start = int(gene['START'].iloc[0]) # start of gene
start -= 1 # end of LHS flank
# RHS flank
end = int(gene['END'].iloc[0]) # end of gene/start of RHS flank
return(start, end, g)
# writes output fasta
def writer(record, gene, window, isolate, x,gene_sense):
record.description = f"{record.description} | {gene} | {window}bp window"
(gene,window,isolate,x)
with open(f"{isolate}_{gene}_{window}_{x}_flank.fasta", "w") as f:
if gene_sense == '+':
SeqIO.write(record, f, "fasta")
log.info(f"{f.name} sucessfully created!")
f.close()
elif gene_sense == '-':
record.seq=record.seq.reverse_complement()
SeqIO.write(record, f, "fasta")
log.info(f"{f.name} sucessfully created!")
f.close()
# for processing multi-fasta files
def filter_abricate(data, isolate):
data = data.loc[data['SEQUENCE'] == isolate]
return(data)
# generates flanks for circularised sequences
def flank_fasta_file_circ(file, window,gene):
args = get_arguments()
unfiltered_abricate_file = file + '_resfinder' # name of abricate output for fasta
data = pd.read_csv(unfiltered_abricate_file, sep='\t', header = 0)
guids=data['SEQUENCE'].unique()
log.debug(guids)
for guid in guids:
abricate_file=filter_abricate(data,guid)
pos = flank_positions(abricate_file, gene)
if (pos == True):
log.warning(f"Error: Gene {gene} not found in {guid}")
else:
pos=list(pos)
gene_sense=abricate_file.loc[abricate_file['GENE']==gene].filter(items=['STRAND'])
log.info(f"Gene {gene} found in {guid}")
gene_sense=str(gene_sense['STRAND'].iloc[0])
log.debug(gene_sense)
log.debug(pos)
# initialise dictionaries of sequence splicing functions
d = {(True, 'both'): lambda record, pos, w, l : record.seq[(pos[0]-w):(pos[1]+w)],
(True, 'upstream'): lambda record, pos, w, l : record.seq[(pos[0]-w):(pos[1])],
(True, 'downstream'): lambda record, pos, w, l : record.seq[pos[0]:(pos[1]+w)],
(False, 'both'): lambda record, pos, w, l : record.seq[(pos[0]-w):pos[0]] + record.seq[pos[1]:(pos[1]+w)],
(False, 'upstream'): lambda record, pos, w, l : record.seq[(pos[0]-w):pos[0]],
(False, 'downstream'): lambda record, pos, w, l : record.seq[pos[1]:(pos[1]+w)]}
d_before = {(True, 'both'): lambda record, pos, w, l : record.seq[(l-(w-pos[0])):l] + record.seq[0:(pos[1]+w)],
(True, 'upstream'): lambda record, pos, w, l : record.seq[(l-(w-pos[0])):l] + record.seq[0:(pos[1])] ,
(True, 'downstream'): lambda record, pos, w, l : record.seq[pos[0]:(pos[1]+w)],
(False, 'both'): lambda record, pos, w, l : record.seq[(l-(w-pos[0])):l] + record.seq[0:pos[0]] + record.seq[pos[1]:(pos[1]+w)] ,
(False, 'upstream'): lambda record, pos, w, l : record.seq[(l-(w-pos[0])):l] + record.seq[0:pos[0]],
(False, 'downstream'): lambda record, pos, w, l : record.seq[pos[1]:(pos[1]+w)]}
d_after = {(True, 'both'): lambda record, pos, w, l : record.seq[(pos[0]-w):l] + record.seq[0:(pos[1]+w-l)],
(True, 'upstream'): lambda record, pos, w, l : record.seq[(pos[0]-w):pos[1]],
(True, 'downstream'): lambda record, pos, w, l : record.seq[(pos[0]):l] + record.seq[0:(pos[1]+w-l)],
(False, 'both'): lambda record, pos, w, l : record.seq[(pos[0]-w):pos[0]] + record.seq[pos[1]:l] + record.seq[0:((pos[1]+w)-l)],
(False, 'upstream'): lambda record, pos, w, l : record.seq[(pos[0]-w):pos[0]],
(False, 'downstream'): lambda record, pos, w, l : record.seq[pos[1]:l] + record.seq[0:((pos[1]+w)-l)]}
# loop through records in fasta
for record in SeqIO.parse(file, "fasta"):
#select the fasta record of interest
w = int(window)
l = len(record.seq)
x = args.flank
if record.description == guid:
if gene_sense == '-':
#record.seq = record.seq.reverse_complement()
if args.flank == 'upstream':
x = 'downstream'
else:
x = 'upstream'
name=record.description
log.info(pos[2] + ' found!')
# if window is too long for sequence length
if w > 0.5 * (pos[0] - pos[1] + l):
log.info(f"Error: Window length {w} too long for sequence length {l}")
continue
# if window exceeds sequence length after gene
if (pos[1] + w > l):
log.info("Window exceeds seq length after gene")
record.seq = d_after[(args.include_gene, x)](record, pos, w, l)
writer(record, pos[2], w, guid, args.flank, gene_sense)
continue
# if window exceeds sequence length before gene
if (pos[0] - w < 0):
log.info("Window exceeds seq length before gene")
record.seq = d_before[(args.include_gene, x)](record, pos, w, l)
writer(record, pos[2], w, guid, args.flank, gene_sense)
continue
else:
log.debug("Window is good")
record.seq = d[(args.include_gene, x)](record, pos, w, l)
writer(record, pos[2], w, guid, args.flank, gene_sense)
continue
# generates flanks for linear sequences
def flank_fasta_file_lin(file, window,gene):
args = get_arguments()
unfiltered_abricate_file = file + '_resfinder' # name of abricate output for fasta
data = pd.read_csv(unfiltered_abricate_file, sep='\t', header = 0)
guids=data['SEQUENCE'].unique()
for guid in guids:
abricate_file=filter_abricate(data,guid)
pos = flank_positions(abricate_file, gene)
if pos == True:
log.error(f"Error: Gene {gene} not found in {guid}")
else:
gene_sense=abricate_file.loc[abricate_file['GENE']==gene].filter(items=['STRAND'])
gene_sense=str(gene_sense['STRAND'].iloc[0])
# initialise dictionary of sequence splicing functions
d_lin = {(True, 'both'): lambda record, pos, w, l: record.seq[max(0,pos[0]-w):min(l, pos[1]+w)],
(True, 'upstream'): lambda record, pos, w, l : record.seq[max(0,pos[0]-w):min(l,pos[1])],
(True, 'downstream'): lambda record, pos, w, l : record.seq[pos[0]:min(l, pos[1]+w)],
(False, 'both'): lambda record, pos, w, l : record.seq[max(0, pos[0]-w):pos[0]] + record.seq[pos[1]:min(l, pos[1]+w)],
(False, 'upstream'): lambda record, pos, w, l : record.seq[max(0, pos[0]-w):pos[0]],
(False, 'downstream'): lambda record, pos, w, l : record.seq[pos[1]:min(l, pos[1]+w)]}
w = int(window)
x = args.flank
# loop through records in fasta
for record in SeqIO.parse(file, "fasta"):
if record.description == guid:
if gene_sense == '-':
if args.flank == 'upstream':
x = 'downstream'
else:
x = 'upstream'
name=record.description
log.info(f"{gene} found in {record.description}")
l = len(record.seq)
record.seq = d_lin[(args.include_gene, x)](record, pos, w, l)
writer(record, pos[2], w, guid, args.flank, gene_sense)
continue
def flanker_main():
args = get_arguments()
run_abricate(args.fasta_file)
if args.list_of_genes == False:
gene_list=args.gene
else:
gene_list=[]
with open(args.list_of_genes, 'rb') as gl:
for line in gl:
line=line.decode('utf-8')
gene_list.append(line.strip())
log.debug(gene_list)
if args.window_stop is not None:
for i in range(args.window, args.window_stop, args.window_step):
for gene in gene_list:
if args.circ == True:
flank_fasta_file_circ(args.fasta_file, i, gene.strip())
else:
flank_fasta_file_lin(args.fasta_file, i, gene.strip())
if args.cluster ==True and args.mode =='default':
cluster.define_clusters(gene,i,args.threads,args.threshold,args.outfile,args.kmer_length,args.sketch_size)
cluster.flank_scrub()
if args.cluster==True and args.mode=='mm':
cluster.define_clusters(gene,i,args.threads,args.threshold,args.outfile,args.kmer_length,args.sketch_size)
log.info("Cleaning up")
cluster.flank_scrub()
else:
for gene in gene_list:
if args.circ == True:
flank_fasta_file_circ(args.fasta_file, args.window, gene.strip())
else:
flank_fasta_file_lin(args.fasta_file, args.window,gene.strip())
if args.cluster ==True and args.mode =='default':
log.info("Performing clustering")
cluster.define_clusters(gene,args.window,args.threads,args.threshold,args.outfile,args.kmer_length,args.sketch_size)
log.info("Cleaning up")
cluster.flank_scrub()
if args.cluster==True and args.mode=='mm':
log.info("Performing clustering")
cluster.define_clusters(gene,"mm",args.threads,args.threshold,args.outfile,args.kmer_length,args.sketch_size)
log.info("Cleaning up")
cluster.flank_scrub()
def main():
args=get_arguments()
logger = log.getLogger()
log.basicConfig(format="%(message)s")
if args.verbose == 0:
logger.setLevel(log.WARNING)
elif args.verbose == 1:
logger.setLevel(log.INFO)
elif args.verbose == 2:
logger.setLevel(log.DEBUG)
log.info(args)
if check_input(args.fasta_file):
log.info(f"{args.fasta_file} is valid and not empty")
if args.mode =="default" or args.mode == "mm":
flanker_main()
elif args.mode =="sm":
if args.list_of_genes != False:
salami.salami_main(args.list_of_genes,args.fasta_file,args.include_gene,args.window_step,args.window_stop,args.outfile,args.flank,args.threads,args.threshold,args.cluster,args.kmer_length,args.sketch_size)
if args.list_of_genes == False:
salami.salami_main(args.gene,args.fasta_file,args.include_gene,args.window_step,args.window_stop,args.outfile,args.flank,args.threads,args.threshold,args.cluster,args.kmer_length,args.sketch_size)
end = time.time()
log.info(f"All done in {round(end - start, 2)} seconds")
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6463476
|
import wx
import wx.gizmos
import wx.lib
from wx.lib.scrolledpanel import *
import sys
import wxogre
from FlatNotebook import *
import ogre.renderer.OGRE as ogre
#from imagebrowser import *
import Image
import time
#import FreeImagePy as FIPY
from ogreyEntity import *
from ogreyLevel import *
from ogreyEntityTree import *
from ogreyAttributesPanel import *
from ogreyLevelAttributesPanel import *
from ogreyExportEntity import *
from ogreyImportEntity import *
from ogreyOptionsPanel import *
from ogreyScriptEditor import *
from ogreyLevelTree import *
from ogreyConfig import *
from ogreyMaterialTool import *
from ogreyOgreManagers import *
from ogreySingleton import *
class OgreManager:
__metaclass__=Singleton
def __init__(self):
self.ogreMgr = wxOgreConfig().ogreMgr
self.ogreMgr.initWindow()
def getOgreManager(self):
return self.ogreMgr
def reloadResourses(self):
if not self.ogreMgr.shouldInitResources:
ogre.GpuProgramManager.getSingleton().removeAll()
#ogre.CompositorManager.getSingleton().removeAll()
ogre.TextureManager.getSingleton().removeAll()
dict = []
scripts = {}
for value in self.resourceInformation.materialsOrigin:
ogre.MaterialManager.getSingleton().unload(value["name"])
ogre.MaterialManager.getSingleton().remove(value["name"])
tempDic = {}
for value in self.resourceInformation.materialsOrigin:
tempDic[value["origin"]] = value
for (key, value) in tempDic.iteritems():
stream = ogre.ResourceGroupManager.getSingleton().openResource(key)
if not ogre.MaterialManager.getSingleton().resourceExists(value["name"]) == True:
ogre.MaterialManager.getSingleton().parseScript(stream.getAsString(), value["group"])
for value in self.resourceInformation.materialsOrigin:
mat = ogre.MaterialPointer(ogre.MaterialManager.getSingleton().getByName(value["name"]))
mat.compile(True)
ogre.MeshManager.getSingleton().reloadAll()
class ProjectManager(FlatNotebook): #
#__metaclass__=Singleton
def __init__(self, parent):
FlatNotebook.__init__(self, parent, style = wx.NB_TOP)
self.parent = parent
self.LevelProjects = []
self.EntityProjects = []
#self.ogreMgr = wxOgreConfig().ogreMgr
#self.ogreMgr.initWindow()
self.ogreMgr = OgreManager().ogreMgr
self.configManager = ConfigManager()
self.configManager.Load()
self.shallInitResourceInforamtion = True
def getOgreManager(self):
return self.ogreMgr
def getResourceInformation(self):
return self.resourceInformation
def createOgreyEntityManager(self):
if self.shallInitResourceInforamtion: self.initResourceInformation()
ogm = OgreyEntityManager(self, self.ogreMgr, self.statusbar, self.resourceInformation, self.configManager)
self.EntityProjects.append(ogm)
self.AddPage(ogm, "Entity Manager " + str(len(self.EntityProjects)))
self.resourceInformation.reload()
def createOgreyLevelProject(self):
if self.shallInitResourceInforamtion: self.initResourceInformation()
ogm = OgreyLevelProject(self, self.ogreMgr, self.statusbar, self.resourceInformation, self.configManager)
self.LevelProjects.append(ogm)
self.AddPage(ogm, "Level Project " + str(len(self.LevelProjects)))
self.resourceInformation.reload()
def initResourceInformation(self):
self.resourceInformation = ResourceInformation()
self.shallInitResourceInforamtion = False
def createLevelProject(self):
pass
def setStatusBar(self, statusbar):
self.statusbar = statusbar
def reloadResourses(self):
if self.shallInitResourceInforamtion: self.initResourceInformation()
if not self.ogreMgr.shouldInitResources:
ogre.GpuProgramManager.getSingleton().removeAll()
#ogre.CompositorManager.getSingleton().removeAll()
ogre.TextureManager.getSingleton().removeAll()
dict = []
scripts = {}
for value in self.resourceInformation.materialsOrigin:
ogre.MaterialManager.getSingleton().unload(value["name"])
ogre.MaterialManager.getSingleton().remove(value["name"])
tempDic = {}
for value in self.resourceInformation.materialsOrigin:
tempDic[value["origin"]] = value
for (key, value) in tempDic.iteritems():
stream = ogre.ResourceGroupManager.getSingleton().openResource(key)
if not ogre.MaterialManager.getSingleton().resourceExists(value["name"]) == True:
ogre.MaterialManager.getSingleton().parseScript(stream.getAsString(), value["group"])
for value in self.resourceInformation.materialsOrigin:
mat = ogre.MaterialPointer(ogre.MaterialManager.getSingleton().getByName(value["name"]))
mat.compile(True)
ogre.MeshManager.getSingleton().reloadAll()
self.updateProjects()
def updateProjects(self):
for project in self.EntityProjects:
project.reload()
for project in self.LevelProjects:
project.reload()
class BottomNotebook(FlatNotebook):
def __init__(self, parent):
FlatNotebook.__init__(self, parent, style = wx.NB_BOTTOM)
class LeftNotebook(wx.Notebook):
def __init__(self, parent):
wx.Notebook.__init__(self, parent, style = wx.NB_TOP)
class TopNotebook(FlatNotebook):
def __init__(self, parent):
FlatNotebook.__init__(self, parent, style = wx.NB_TOP)
class MiddleNotebook(FlatNotebook):
def __init__(self, parent):
FlatNotebook.__init__(self, parent, style = wx.NB_TOP)
#self.option = option
self.Bind(EVT_FLATNOTEBOOK_PAGE_CHANGED, self.OnChange)
def OnChange(self, event):
try:
self.window.bindView(self.GetPage(self.GetSelection()))
except:
pass
def bindWindow(self, window):
self.window = window
class RightNotebook(wx.Notebook):#(FlatNotebook):
def __init__(self, parent):
wx.Notebook.__init__(self, parent, style = wx.NB_TOP)
self.Bind(EVT_FLATNOTEBOOK_PAGE_CHANGED, self.OnSelect)
def OnSelect(self, event):
if not self.GetSelection() == -1:
self.GetPage(self.GetPreviousSelection()).select(False)
self.GetPage(self.GetSelection()).select(True)
#self.Update()
class wxOgreConfig:
def __init__(self):
self.ogreMgr = wxogre.OgreManager()
class NameFactory:
__metaclass__=Singleton
def __init__(self):
self.Name = 0
def getName(self):
self.Name += 1
return str(self.Name)
class Menu(wx.Menu):
def __init__(self):
wx.Menu.__init__(self)
def AddMenuItems(self, items, parentMenu = None):
if parentMenu == None:
parentMenu = self
else: self = wx.Menu()
for menuItem in items:
if menuItem["enabled"] == True:
if menuItem["menuItem"] == "Submenu":
subMenu = parentMenu.AddMenuItems(menuItem["items"], self)
self.AppendMenu(id = -1, text=menuItem["name"], submenu = subMenu)
elif not menuItem["menuItem"] == "Seperator":
self.AppendItem(item = menuItem["menuItem"])
if not menuItem["event"] == False:
parentMenu.Bind(wx.EVT_MENU, menuItem["event"], id =menuItem["menuItem"].GetId())
elif menuItem["menuItem"] == "Seperator":
self.AppendSeparator()
return self
def FlushMenu(self):
items = self.GetMenuItems()
for i in items:
self.Remove(i.GetId())
class TextureBrowser(wx.lib.scrolledpanel.ScrolledPanel):
def __init__(self, parent, file):
wx.lib.scrolledpanel.ScrolledPanel.__init__(self, parent, style = wx.HSCROLL | wx.VSCROLL)
#try:
#jpg1 = wx.Image(file, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.SetupScrolling()
self.EnableScrolling(True, True)
self.SetScrollbars(20, 20, 50, 50)
img = Image.open(file)
#ld = FIPY.freeimage()
#img = FIPY.genericLoader(file)
#img = FIPY.Image(file)
#img.load(file)
#del img
#pilimg = FIPY.convertToPil(img)
#jpg1 = jpg1.convert('RGB').tostring()
#jpg1 = wx.EmptyImage(img.getSize().getWidth(), img.getSize().getHeight())
#jpg1.SetData(img.convertToWx())
#jpg1 = img.convertToWx()
jpg1 = wx.EmptyImage(img.size[0], img.size[1])
jpg1.SetData(img.convert('RGB').tostring())
#jpg1 = wx.Image(img.convert('RGB').tostring() , wx.BITMAP_TYPE_ANY).ConvertToBitmap()
#jpg1 = wx.BitmapFromImage(wx.ImageFromStream(stream))
stb = wx.StaticBitmap(self, -1, pos=(5, 5), size = (jpg1.GetWidth(), jpg1.GetHeight()))
#stb.SetBitmap(jpg1.ConvertToBitmap())
stb.SetBitmap(jpg1.ConvertToBitmap())
#except:
# pass
class LogList(wx.TextCtrl):
def __init__(self, parent):
wx.TextCtrl.__init__(self, parent, -1, "", style=wx.TE_MULTILINE)
sys.stdout = self
sys.stderr = self
class MenuFile(wx.Menu):
def __init__(self, parent, projectManager, name):
wx.Menu.__init__(self)
self.parent = parent
self.projectManager = projectManager
self.name = name
self.enabled = True
#level = self.Append(-1, "&New Level Project", "Create a new Level")
entity = self.Append(-1, "&New Entity Manager", "Create a new Entity Manager Window Instance")
self.AppendSeparator()
exit = self.Append(-1, "&Exit", "Exit Ogrey")
#wx.EVT_MENU(self.parent, level.GetId(), self.OnLevel)
wx.EVT_MENU(self.parent, entity.GetId(), self.OnEntity)
wx.EVT_MENU(self.parent, exit.GetId(), self.OnExit)
def OnLevel(self, event):
self.projectManager.createOgreyLevelProject()
def OnEntity(self, event):
self.projectManager.createOgreyEntityManager()
def OnExit(self, event):
wx.Exit()
class MenuRender(wx.Menu):
def __init__(self, parent, window, name, ogreScene, tabs):
wx.Menu.__init__(self)
self.parent = parent
self.window = window
self.name = name
self.tabs = tabs
self.ogreScene = ogreScene
#solid = self.Append(-1 ,"Solid", "Solid models",kind=wx.ITEM_RADIO )
#wire = self.Append(-1, "Wireframe", "Wireframe models",kind=wx.ITEM_RADIO)
#points = self.Append(-1, "Points", "Points modelse",kind=wx.ITEM_RADIO)
getView = self.Append(-1, "Get Ogre View", "Get a OGre View")
reload = self.Append(-1, "Reload", "Reload materials")
#rendertarget = self.Append(-1, "Rendertarget")
#self.AppendSeparator()
#menuRenderShadows = wx.Menu()
#menuRenderShadows.Append(184, "On", "Shadows On", kind=wx.ITEM_RADIO)
#menuRenderShadows.Append(185, "Off", "Shadows Off", kind=wx.ITEM_RADIO)
#shadows = self.Append(-1, "Shadows...", "Shadows attributes")
#wx.EVT_MENU(self.parent, solid.GetId(), self.renderModeSolid)
#wx.EVT_MENU(self.parent, wire.GetId(), self.renderModeWireframes)
#wx.EVT_MENU(self.parent, points.GetId(), self.renderModePoints)
wx.EVT_MENU(self.parent, getView.GetId(), self.OnGetView)
wx.EVT_MENU(self.parent, reload.GetId(), self.OnReload)
#wx.EVT_MENU(self.parent, shadows.GetId(), self.renderShadows)
#wx.EVT_MENU(self.parent, rendertarget.GetId(), self.renderTarget)
#def renderTarget(self, event):
# self.window.renderWindow.active = False
def OnGetView(self, event):
self.tabs.AddPage(self.ogreScene.getView(), "Entity Viewpoint")
def bind(self, window):
self.window = window
def renderModeSolid(self, event):
self.window.renderModeSolid()
def renderModeWireframes(self, event):
self.window.renderModeWireframe()
def renderModePoints(self, event):
self.window.renderModePoints()
#def renderShadows(self, event):
# shadowAttribDialog = ShadowsDialog(self.parent, self.window)
#returnCode = shadowAttribDialog.ShowModal()
#shadowAttribDialog.Destroy()
class MenuBrowse(wx.Menu):
def __init__(self, parent, window, name):
wx.Menu.__init__(self)
self.parent = parent
self.window = window
self.name = name
openScript = self.Append(-1 ,"Open Script File...", "Open File to Edit")
openTexture = self.Append(-1, "Open Texture...", "Open Texture")
wx.EVT_MENU(self.parent, openScript.GetId(), self.OnOpenScript)
wx.EVT_MENU(self.parent, openTexture.GetId(), self.OnOpenTexture)
def OnOpenScript(self, event):
fdg = wx.FileDialog(self.parent, message = "Choose File", style=wx.OPEN, wildcard = "*.*")
if fdg.ShowModal() == wx.ID_OK:
scrip = ScriptEditor(self.window)
scrip.Load(fdg.GetPath())
self.window.AddPage(scrip, fdg.GetFilename())
fdg.Destroy()
def OnSave(self, event):
pass
def OnOpenTexture(self, event):
fdg = wx.FileDialog(self.parent, message = "Choose File", style=wx.OPEN, wildcard = "*.*")
if fdg.ShowModal() == wx.ID_OK:
#scrip.Load(fdg.GetPath())
self.window.AddPage(TextureBrowser(self.window, fdg.GetPath()), fdg.GetFilename())
fdg.Destroy()
class MenuHelp(wx.Menu):
def __init__(self, parent, window):
wx.Menu.__init__(self)
self.parent = parent
self.name = "&Help"
self.enabled = True
about = self.Append(-1 ,"About", "About Ogrey Tool")
wx.EVT_MENU(self.parent, about.GetId(), self.OnAbout)
def OnAbout(self, event):
adlg = wx.AboutDialogInfo()
adlg.SetName("Ogrey")
adlg.SetVersion("Beta 1")
adlg.SetDevelopers(["<NAME> <EMAIL>"])
adlg.SetCopyright("GPL")
adlg.SetDescription("Ogrey Monster Engine Entity Tool")
adlg.SetWebSite("http://www.opengd.org")
wx.AboutBox(adlg)
class MenuWindow(wx.Menu):
def __init__(self, parent, window):
wx.Menu.__init__(self)
self.parent = parent
self.name = "&Window"
self.enabled = True
class MenuEntityManager(wx.Menu):
def __init__(self, parent, window):
wx.Menu.__init__(self)
self.parent = parent
self.name = "&Entity Manager"
self.enabled = False
class MenuEdit(wx.Menu):
def __init__(self, parent, window):
wx.Menu.__init__(self)
self.parent = parent
self.name = "&Edit"
self.enabled = False
self.window = window
reload = self.Append(-1, "Reload", "Reload materials")
wx.EVT_MENU(self.parent, reload.GetId(), self.OnReload)
def OnReload(self, event):
self.window.reloadResourses()
class OgreyStatusBar(wx.StatusBar):
def __init__(self, parent):
wx.StatusBar.__init__(self,parent)
self.SetFieldsCount(2)
self.SetStatusWidths([-5, -2])
self.SetStatusText("Welcome to Ogrey Entity Tool",0)
class MainOgreyEntityToolFrame(wx.Frame):
def __init__(self, *args, **kw):
wx.Frame.__init__(self, *args, **kw)
config = ConfigManager()
config.Load()
self.projectManager = ProjectManager(self)
#ogreyMaterialTool(self, config.GetConfig(), self.projectManager.getOgreManager())
##self.projectManager.SetBackgroundColour(wx.Color(128,128,128))
mainMenu = [
MenuFile(self,self.projectManager, "&File"),
MenuEdit(self, self.projectManager),
MenuBrowse(self, self.projectManager, "&Browse"),
MenuWindow(self, self.projectManager),
MenuHelp(self, self.projectManager),
## MenuRender(self, self.middleNotebook, "&Ogre", self.ogreScene, self.middleNotebook),
## MenuEditor(self, self.middleNotebook, "&Editor")
]
menuBar = wx.MenuBar()
for menu in mainMenu:
menuBar.Append(menu, menu.name)
#menu.Enable(0, menu.enabled)
self.SetMenuBar(menuBar)
self.statusbar = OgreyStatusBar(self)
self.SetStatusBar(self.statusbar)
self.projectManager.setStatusBar(self.statusbar)
class OgreyEntityTool(wx.App):
def OnInit(self):
self.frame = MainOgreyEntityToolFrame(None, -1, 'Ogrey Monster Engine Entity Tool Beta 1', size=(800,600))
self.frame.Center(wx.CENTER_ON_SCREEN)
self.frame.Show(True)
return True
if __name__ == '__main__':
app = OgreyEntityTool(False)
app.MainLoop()
del app
|
StarcoderdataPython
|
1638038
|
<filename>post_processing/stella_plots.py<gh_stars>0
# -*- coding: utf-8 -*-
## some coding for commonly used plots
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
# setup some plot defaults
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rc('font', size=30)
rcParams.update({'figure.autolayout': True})
def plot_1d(x,y,xlab,title='',ylab=''):
fig = plt.figure(figsize=(12,8))
plt.plot(x,y)
plt.xlabel(xlab)
if len(ylab) > 0:
plt.ylabel(ylab)
if len(title) > 0:
plt.title(title)
return fig
def logyplot_1d(x,y,xlab,title='',ylab=''):
fig = plt.figure(figsize=(12,8))
plt.semilogy(x,y)
plt.xlabel(xlab)
if len(ylab) > 0:
plt.ylabel(ylab)
if len(title) > 0:
plt.title(title)
return fig
def plot_2d(z,xin,yin,zmin,zmax,xlab='',ylab='',title='',cmp='RdBu'):
fig = plt.figure(figsize=(12,8))
x,y = np.meshgrid(xin,yin)
plt.imshow(z, cmap=cmp, vmin=zmin, vmax=zmax,
extent=[x.min(),x.max(),y.min(),y.max()],
interpolation='nearest', origin='lower', aspect='auto')
plt.axis([x.min(), x.max(), y.min(), y.max()])
plt.colorbar()
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.title(title)
return fig
def movie_2d(z,xin,yin,zmin,zmax,nframes,outfile,xlab='',ylab='',title='',step=1,cmp='RdBu'):
from matplotlib import animation
fig = plt.figure(figsize=(12,8))
x,y = np.meshgrid(xin,yin)
im = plt.imshow(z[0,:,:], cmap=cmp, vmin=zmin[0], vmax=zmax[0],
extent=[x.min(),x.max(),y.min(),y.max()],
interpolation='nearest', origin='lower', aspect='auto')
plt.colorbar()
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.title(title)
ims = []
ims.append([im])
for i in range(1,nframes,step):
im = plt.imshow(z[i,:,:], cmap=cmp, vmin=zmin[i], vmax=zmax[i],
extent=[x.min(),x.max(),y.min(),y.max()],
interpolation='nearest', origin='lower', aspect='auto')
ims.append([im])
ani = animation.ArtistAnimation(fig,ims,interval=50,blit=True)
ani.save(outfile)
def movie_1d(x,y,xmin,xmax,ymin,ymax,nframes,outfile,xlab,ylab):
from matplotlib import animation
fig = plt.figure(figsize=(12,8))
ax=plt.axes(xlim=(xmin,xmax),ylim=(ymin,ymax))
line, = ax.plot([],[],lw=2)
plt.xlabel(xlab)
plt.ylabel(ylab)
def init():
line.set_data([],[])
return line,
def animate(i):
line.set_data(x,y[i,:])
return line,
anim=animation.FuncAnimation(fig, animate, init_func=init,
frames=nframes, interval=20)
anim.save(outfile)
|
StarcoderdataPython
|
1685271
|
#!/usr/bin/python3
'''Scraper
'''
from abc import ABC, abstractmethod
class Scraper(ABC):
'''An abstract class for all scrapers.
'''
@abstractmethod
def get_name(self) -> str:
'''Retrieves the name of this scraper.
'''
pass
@abstractmethod
def get_manga_info(self, url: str) -> dict:
'''
Retrieves information about a Manga from the given manga URL.
Args:
url (str): The URL of the manga page.
Returns:
dict: Information about a manga.
'''
pass
@abstractmethod
def get_chapter_images(self, url: str) -> list:
'''
Retrieves the chapter image sources of the given chapter URL.
Args:
url (str): The URL of the chapter page.
Returns:
list: A list of URLs of each image source for a chapter.
'''
pass
|
StarcoderdataPython
|
3492726
|
<gh_stars>1-10
import typing as t
import pytest
from corm import Entity, Field, KeyNested, Storage, Relationship, KeyManager
def test_nested_key():
class SomeEntity(Entity):
id: int = Field(pk=True)
name: str
class EntityHolder(Entity):
entity: SomeEntity = KeyNested(
related_entity_field=SomeEntity.id,
origin='entity_id',
)
storage = Storage()
entity = SomeEntity({'id': 123, 'name': 'entity'}, storage=storage)
holder = EntityHolder({'entity_id': 123}, storage=storage)
assert holder.entity == entity
assert holder.dict() == {'entity_id': 123}
class ManyEntityHolder(Entity):
entities: t.List[SomeEntity] = KeyNested(
related_entity_field=SomeEntity.id,
origin='entity_ids',
many=True,
)
storage = Storage()
entity1 = SomeEntity({'id': 123, 'name': 'entity1'}, storage=storage)
entity2 = SomeEntity({'id': 321, 'name': 'entity2'}, storage=storage)
holder = ManyEntityHolder({'entity_ids': [123, 321]}, storage=storage)
assert holder.entities == [entity1, entity2]
assert holder.dict() == {'entity_ids': [123, 321]}
storage = Storage()
SomeEntity({'id': 123, 'name': 'entity1'}, storage=storage)
SomeEntity({'id': 321, 'name': 'entity2'}, storage=storage)
holder = ManyEntityHolder({'entity_ids': [123, 99999]}, storage=storage)
with pytest.raises(ValueError):
holder.entities
def test_make_back_relationship():
class SomeEntity(Entity):
id: int = Field(pk=True)
name: str
holder: 'EntityHolder' = Relationship('EntityHolder')
class EntityHolder(Entity):
entities: t.List[SomeEntity] = KeyNested(
related_entity_field=SomeEntity.id,
origin='entity_ids',
many=True,
back_relation=True,
)
storage = Storage()
entity = SomeEntity({'id': 123, 'name': 'entity'}, storage=storage)
holder = EntityHolder({'entity_ids': [123, 456]}, storage=storage)
delayed_entity = SomeEntity(
{
'id': 456,
'name': 'delayed entity',
},
storage=storage,
)
assert holder.entities == [entity, delayed_entity]
assert entity.holder == holder
assert delayed_entity.holder == holder
assert holder.dict({'entity_ids': [123, 456]})
assert entity.dict() == {'id': 123, 'name': 'entity'}
assert delayed_entity.dict() == {'id': 456, 'name': 'delayed entity'}
def test_complex_key():
class SomeEntity(Entity):
id: int = Field(pk=True)
name: str
class EntityKeyManager(KeyManager):
def get(self, data):
return data['id']
def prepare_to_set(self, entity: SomeEntity) -> t.Any:
return {'id': entity.id}
class ManyEntityHolder(Entity):
entities: t.List[SomeEntity] = KeyNested(
related_entity_field=SomeEntity.id,
origin='entity_ids',
many=True,
key_manager=EntityKeyManager(),
)
storage = Storage()
entity1 = SomeEntity({'id': 123, 'name': 'entity1'}, storage=storage)
entity2 = SomeEntity({'id': 321, 'name': 'entity2'}, storage=storage)
holder = ManyEntityHolder(
{
'entity_ids': [
{
'id': 123,
},
{
'id': 321,
},
],
},
storage=storage,
)
assert holder.entities == [entity1, entity2]
assert holder.dict() == {'entity_ids': [{'id': 123}, {'id': 321}]}
def test_change_values():
class SomeEntity(Entity):
id: int = Field(pk=True)
name: str
holder: 'EntityHolder' = Relationship(entity_type='EntityHolder')
many_holder: 'ManyEntityHolder' = Relationship(
entity_type='ManyEntityHolder',
)
class EntityKeyManager(KeyManager):
def get(self, data):
return data['id']
def prepare_to_set(self, entity: SomeEntity) -> t.Any:
return {'id': entity.id}
class EntityHolder(Entity):
entity: SomeEntity = KeyNested(
related_entity_field=SomeEntity.id,
origin='entity_id',
key_manager=EntityKeyManager(),
back_relation=True,
)
class ManyEntityHolder(Entity):
entities: t.List[SomeEntity] = KeyNested(
related_entity_field=SomeEntity.id,
origin='entity_ids',
many=True,
back_relation=True,
key_manager=EntityKeyManager(),
)
storage = Storage()
entity1 = SomeEntity({'id': 123, 'name': 'entity1'}, storage=storage)
entity2 = SomeEntity({'id': 321, 'name': 'entity2'}, storage=storage)
holder = EntityHolder(
{
'entity_id': {
'id': 123,
},
},
storage=storage,
)
holder.entity = entity2
assert entity1.holder is None
assert entity2.holder is holder
assert holder.entity is entity2
assert holder.dict() == {'entity_id': {'id': 321}}
many_holder = ManyEntityHolder(
{
'entity_ids': [{
'id': 123,
}],
},
storage=storage,
)
many_holder.entities = [entity2]
assert many_holder.entities == [entity2]
assert entity1.many_holder is None
assert entity2.many_holder is many_holder
assert many_holder.dict() == {
'entity_ids': [{
'id': 321,
}],
}
@pytest.mark.skip(reason='Not implemented yet')
def test_change_key_value():
class SomeEntity(Entity):
id: int = Field(pk=True)
name: str
class EntityHolder(Entity):
entity: SomeEntity = KeyNested(
related_entity_field=SomeEntity.id,
origin='entity_id',
)
storage = Storage()
entity = SomeEntity({'id': 123, 'name': 'entity'}, storage=storage)
holder = EntityHolder({'entity_id': 123}, storage=storage)
entity.id = 321
assert holder.entity == entity
assert holder.dict() == {'entity_id': 321}
|
StarcoderdataPython
|
11235493
|
<gh_stars>0
from concurrent import futures
__copyright__ = '''
Copyright 2018 the original author or authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
__author__ = '<NAME>'
import grpc
import time
import function_pb2_grpc as function
import function_pb2 as message
import json
import logging
import os
stopped = True
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
log = logging.getLogger(__name__)
def run(func, interaction_model, port):
"""
Runs the gRPC server
:param func: the function to invoke for each gRPC Call() method
:param interaction_model: indicates interaction model: None is single value parameter and return type, 'stream' indicates input and output are generators
:param port: the gRPC port
:return: None
"""
global server, stopped
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
function.add_MessageFunctionServicer_to_server(MessageFunctionServicer(func, interaction_model), server)
server.add_insecure_port('%s:%s' % ('[::]', port))
server.start()
stopped = False
while not stopped:
time.sleep(3)
def stop(grace=None):
"""
Stop the server
:param grace: a grace period, in seconds, to wait
:return: None
"""
global server, stopped
log.info("stopping server")
server.stop(grace)
stopped = True
log.info("server stopped")
"""
The gRPC server implementation. Will invoke a function for each Call() using the interaction model given by the value
of `interaction_model` defined in the function module. `stream` indicates bidirectional streaming. By default, the parameter
and value are primitive types given by the `Content-Type` message header. For `text/plain` the function is expected to accept and
return a str. For `Content-Type=application/json`, the function is expected to accept and return a dict.
"""
class MessageFunctionServicer(function.MessageFunctionServicer):
def __init__(self, func, interaction_model):
self.func = func
self.interaction_model = interaction_model
def Call(self, request_iterator, context):
"""
The gRPC server implementation
:param request_iterator: a generator representing a stream of request messages
:param context: the gRPC request context
:return:
"""
if self.interaction_model == 'stream':
if is_source(self.func):
for item in map(wrap_message, self.func()):
yield item
else:
for item in map(wrap_message, self.func(convert_request_payload(msg) for msg in request_iterator)):
yield item
else:
if is_source(self.func):
result = self.func()
yield wrap_message(result, request.headers)
else:
for request in request_iterator:
result = self.func(convert_request_payload(request))
yield wrap_message(result, request.headers)
def wrap_message(payload, headers={}):
"""
Wrap a payload in a Message
:param payload:
:param headers:
:return: the Message
"""
return build_reply_message(payload, headers)
def convert_request_payload(request):
"""
Convert the request payload from bytes for a given request's Content Type header
:param request: the request Message
:return: varies by content type header, e.g., dict or str
"""
if 'application/json' in request.headers['Content-Type'].values:
return json.loads(request.payload)
elif 'application/octet-stream' in request.headers['Content-Type'].values:
return request.payload
elif 'text/plain' in request.headers['Content-Type'].values:
return request.payload.decode('UTF-8')
return request.payload
def build_reply_message(payload, headers):
"""
Convert the reply payload to bytes given the request's Accept header
:param headers: The request header values
:param val: the payload
:return: bytes
"""
reply = message.Message()
if headers.get('correlationId', None):
reply.headers['correlationId'].values[:] = headers['correlationId'].values[:]
accepts = headers.get('Accepts',message.Message.HeaderValue()).values
if len(accepts) == 0 or 'text/plain' in accepts or "*/*" in accepts:
if type(payload) is dict:
reply.payload = bytes(json.dumps(payload), 'UTF-8')
else:
if type(payload) is str:
reply.payload = bytes(payload, 'UTF-8')
reply.headers['Content-type'].values[:] = ["text/plain"]
else:
reply.payload = payload
reply.headers['Content-type'].values[:] = ["application/octet-stream"]
elif 'application/json' in accepts:
if type(payload) is dict:
reply.payload = bytes(json.dumps(payload), 'UTF-8')
reply.headers['Content-type'].values[:] = ["application/json"]
else:
raise RuntimeError('Cannot convert type %s to JSON' % type(payload))
else:
raise RuntimeError('Unsupported Accept header %s' % accepts)
return reply
def is_source(func):
return func.__code__.co_argcount == 0
|
StarcoderdataPython
|
5130495
|
# from soccerpy.modules.Fixture.fixture import Fixture
|
StarcoderdataPython
|
6589295
|
<filename>remove.py
traversed_links_file = open("traversed_links.txt", "r")
traversed_links = traversed_links_file.readlines()
go = []
count = 0
index = 0
while index < len(traversed_links):
if traversed_links[index].find("interforo") == -1 and traversed_links[index].find("blogspot") == -1 and traversed_links[index].find("wordpress") == -1:
index = index + 1
else:
count = count + 1
go.append(traversed_links[index])
del traversed_links[index]
print(count)
traversed_links_file = open("traversed_links.txt", "w")
for link in traversed_links:
traversed_links_file.write(link)
traversed_links_file = open("interforo.txt", "w")
for link in go:
traversed_links_file.write(link)
|
StarcoderdataPython
|
5135607
|
<filename>av_utilities/convert.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
convert.py
Script to quickly convert an av file to another type.
Currently will convert from:
.mp3 (stereo)
.mxf (stereo)
.wav (stereo)
Currently will convert to:
.mp3 (stereo)
.wav (PCM signed 16-bit little-endian, stereo)
Author:
– <NAME>, 2016 (<EMAIL>)
© 2016, Child Mind Institute, Apache v2.0 License
Created on Fri Dec 23 12:43:40 2016
@author: jon.clucas
"""
import argparse
import os
import shlex
import subprocess
def collect_extension(dirpath, extension, skips=[
"@Recycle",
"Tests"
]):
"""
function to collect mxf files in a given directory, recursively
Parameters
----------
dirpath: string
path to search
extension: string
filetype
skips: list of strings, optional
directory and file names to skip,
default: ["@Recycle", "Tests"]
Returns
-------
collected: list of strings
list of paths to files with given extension
"""
collected = []
for fp in [
os.path.abspath(
os.path.join(
dirpath,
f
)
) for f in os.listdir(
dirpath
) if f not in skips
]:
if os.path.isdir(fp):
try:
collected = [*collected, *collect_extension(fp, extension, skips)]
except:
print("No {0} files in `{1}`.".format(
extension,
fp
))
elif fp.lower().endswith(extension.lower()):
collected.append(fp)
return(collected)
def convert(in_file, ext_from, ext_to, out_path=None):
# make an output filename
out_base = os.path.basename(in_file[:-(1 + len(ext_from))])
out_i = 0
out_path = os.path.dirname(in_file) if not out_path else out_path
out_file = os.path.join(out_path, '.'.join([out_base, ext_to]))
while os.path.exists(out_file):
out_file = os.path.join(out_path, '{0}_{1}.{2}'.format(
out_base,
str(out_i),
ext_to
))
out_i = out_i + 1
# do the conversion verbosely
to_convert = ''.join([
"ffmpeg -i ",
shlex.quote(in_file),
ffmpeg_middle(ext_to),
shlex.quote(out_file)
])
print(''.join(["Converting \"", in_file, "\" to \"", out_file, "\""]))
subprocess.call(to_convert, shell=True)
def ffmpeg_middle(ext_to):
"""
Function to get ffmpeg command middle section to convert to a given
file extension.
Parameter
---------
ext_to: string
filetype to convert to
Returns
-------
ffmiddle: string
ffmpeg command middle section
"""
ffmiddles = {
"mp3": " -codec:a libmp3lame -ac 2 -qscale:a 2 ",
"wav": " -ac 2 -acodec pcm_s16le "
}
return(ffmiddles[ext_to.lower()])
def main():
# script can be run from the command line
parser = argparse.ArgumentParser()
parser.add_argument('in_file', metavar='in_file', type=str)
parser.add_argument('ext_from', metavar='ext_from', type=str)
parser.add_argument('ext_to', metavar='ext_to', type=str)
parser.add_argument('out_path', metavar='out_path', type=str, default="")
arg = parser.parse_args()
if len(out_file):
mxf_to_mp3(arg.in_file, arg.ext_from, arg.ext_to, arg.out_path)
else:
mxf_to_mp3(arg.in_file, arg.ext_from, arg.ext_to)
# ============================================================================
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
39671
|
<filename>tests/graphical/one_view.py
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from cgpm.crosscat.state import State
from cgpm.utils import config as cu
from cgpm.utils import general as gu
from cgpm.utils import general as gu
from cgpm.utils import test as tu
# Set up the data generation
cctypes, distargs = cu.parse_distargs(
['normal',
'poisson',
'bernoulli',
'categorical(k=4)',
'lognormal',
'exponential',
'beta',
'geometric',
'vonmises'])
T, Zv, Zc = tu.gen_data_table(
200, [1], [[.25, .25, .5]], cctypes, distargs,
[.95]*len(cctypes), rng=gu.gen_rng(10))
state = State(T.T, cctypes=cctypes, distargs=distargs, rng=gu.gen_rng(312))
state.transition(N=10, progress=1)
def test_crash_simulate_joint(state):
state.simulate(-1, [0, 1, 2, 3, 4, 5, 6, 7, 8], N=10)
def test_crash_logpdf_joint(state):
state.logpdf(-1, {0:1, 1:2, 2:1, 3:3, 4:1, 5:10, 6:.4, 7:2, 8:1.8})
def test_crash_simulate_conditional(state):
state.simulate(-1, [1, 4, 5, 6, 7, 8], {0:1, 2:1, 3:3}, None, 10)
def test_crash_logpdf_conditional(state):
state.logpdf(
-1, {1:2, 4:1, 5:10, 6:.4, 7:2, 8:1.8}, {0:1, 2:1, 3:3})
def test_crash_simulate_joint_observed(state):
state.simulate(1, [0, 1, 2, 3, 4, 5, 6, 7, 8], None, None, 10)
def test_crash_logpdf_joint_observed(state):
with pytest.raises(ValueError):
state.logpdf(1, {0:1, 1:2, 2:1, 3:3, 4:1, 5:10, 6:.4, 7:2, 8:1.8})
def test_crash_simulate_conditional_observed(state):
with pytest.raises(ValueError):
state.simulate(1, [1, 4, 5, 6, 7, 8], {0:1, 2:1, 3:3}, None, 10)
def test_crash_logpdf_conditional_observed(state):
with pytest.raises(ValueError):
state.logpdf(
1, {1:2, 4:1, 5:10, 6:.4, 7:2, 8:1.8}, {0:1, 2:1, 3:3})
# Plot!
state.plot()
# Run some solid checks on a complex state.
test_crash_simulate_joint(state)
test_crash_logpdf_joint(state)
test_crash_simulate_conditional(state)
test_crash_logpdf_conditional(state)
test_crash_simulate_joint_observed(state)
test_crash_logpdf_joint_observed(state)
test_crash_simulate_conditional_observed(state)
test_crash_logpdf_conditional_observed(state)
# Joint equals chain rule for state 1.
joint = state.logpdf(-1, {0:1, 1:2})
chain = state.logpdf(-1, {0:1}, {1:2}) + state.logpdf(-1, {1:2})
assert np.allclose(joint, chain)
if False:
state2 = State(T.T, cctypes=cctypes, distargs=distargs, rng=gu.gen_rng(12))
state2.transition(N=10, progress=1)
# Joint equals chain rule for state 2.
state2.logpdf(-1, {0:1, 1:2})
state2.logpdf(-1, {0:1}, {1:2}) + state2.logpdf(-1, {1:2})
# Take the Monte Carlo average of the conditional.
mc_conditional = np.log(.5) + gu.logsumexp([
state.logpdf(-1, {0:1}, {1:2}),
state2.logpdf(-1, {0:1}, {1:2})
])
# Take the Monte Carlo average of the joint.
mc_joint = np.log(.5) + gu.logsumexp([
state.logpdf(-1, {0:1, 1:2}),
state2.logpdf(-1, {0:1, 1:2})
])
# Take the Monte Carlo average of the marginal.
mc_marginal = np.log(.5) + gu.logsumexp([
state.logpdf(-1, {1:2}),
state2.logpdf(-1, {1:2})
])
|
StarcoderdataPython
|
11341355
|
<filename>stellapy/GUI/graph_tools/OptionsWindow.py<gh_stars>1-10
#################################################################
# OPTIONS WINDOW OPENED FROM THE TOOLBAR
#################################################################
# Load modules
import tkinter as tk
from tkinter import ttk
# Load personal modules
from .ModifyStyling import PAD_TITLE, PAD_LABEL, PAD_ENTRY #@unresolvedimport
from curses.ascii import TAB
#================
# MENU CREATION
#================
class OptionsWindow:
""" Window that opens when the "options" button on the CustomToolbar is pressed.
Parameters
----------
root : Tk()
Root of the tkinter application.
master_class : tab_Convergence, tab_Profiles, tab_Linear, ...
Must have an attribute called Graph
Graph : class_omegavst, class_potentialvsz, ...
The plotting class, must have attributes:
x_name, y_name : str
range, label : dict
axis_id : int
Multiple canvasses can be linked to one tab, this id defines which canvas to manipulate.
poppedout_id : int
Multiple popped out windows can exist, this id defines which canvas to manipulate.
"""
def __init__(self, root, master_class, axis_id=0, poppedout_id=None):
# Attach the root so we can carry it into the functions, get the tab for its dimensions
self.root = root
# Attach the master class: tab_Convergence, tab_Profiles, ...
self.tab = master_class
# Get the width and height of the root window + title bars and of simply the root window
self.height = root.winfo_height() # Height of the application minus the application title
self.width = root.winfo_width() # Width of the application
# Save which canvas this options window is linked to
self.axis_id = axis_id
self.poppedout_id = poppedout_id
# Prevent indentention of header comment on next lines
if True: return
#==============================
# Open the options window
#==============================
def open_optionsWindow(self):
# Link the plotting class
if self.poppedout_id==None: self.graph = self.tab.Graph[self.axis_id]
if self.poppedout_id!=None: self.graph = self.root.graph_poppedOut[self.poppedout_id]
# Create the preferences window and keep it on top
self.window_options = tk.Toplevel(self.root)
self.window_options.title("Figure options")
self.window_options.attributes('-topmost', 'true')
# Get the height of the screen
winx = 200; winy = 600;
if "twin axis" in list(self.graph.layout.keys()):
if self.graph.layout["twin axis"] == True:
winy = 750
# Center the new window in the screen
x = self.width/2 - winx/2
y = self.height/2 - winy/2
self.window_options.geometry("+%d+%d" % (x, y))
# Create a tabbed view with the possible settings
self.tab_header = ttk.Notebook(self.window_options, style='header.TNotebook')
# Add frames to the tab_header which are the tab windows
self.tab_axis = ttk.Frame(self.tab_header)
self.tab_curves = ttk.Frame(self.tab_header)
# Add the tabs to the tab header
self.tab_header.add(self.tab_axis, text='Axis')
self.tab_header.add(self.tab_curves, text='Curves')
self.tab_header.pack(expand=1, fill='both')
# Attach the root so the classes can acces them
self.tab_axis.root = self.root
self.tab_curves.root = self.root
self.tab_axis.window = self
self.tab_curves.window = self
# Fill the tabs with widgets through classes
self.tabAxes = tabAxis(self.tab_axis, self.tab, self.graph)
self.tabCurves = tabCurves(self.tab_curves, self.tab, self.graph)
# Prevent indentention of header comment on next lines
if True: return
#==================================
# Show changes on the graph
#==================================
def update_graph(self):
# Draw the canvas and update the GUI
if self.poppedout_id==None: self.tab.Canvas[self.axis_id].draw_idle()
if self.poppedout_id!=None: self.root.canvasPoppedOut[self.poppedout_id].draw_idle()
self.root.update_idletasks()
# Prevent indentention of header comment on next lines
if True: return
#==================================================================
# Add the tab controling the axis parameters to the options window
#==================================================================
class tabAxis:
def __init__(self, window, tab, graph):
# Get data from the GUI
self.root = window.root
self.window = window.window
self.tab = tab
self.graph = graph
self.data_range = self.graph.range
self.labels = self.graph.label
# Variables are on the axis
self.x_name = self.graph.x_name
self.y_name = self.graph.y_name
if "twin axis" in list(self.graph.layout.keys()):
if self.graph.layout["twin axis"] == True:
self.ytwin_name = self.graph.ytwin_name
# Add some variables
self.options_scale = sorted(("Linear", "Logaritmic"))
self.options_units = sorted(("Normalized", "SI units"))
# Create the frame
self.tab_axis = ttk.Frame(window)
self.tab_axis.pack(expand=1, fill=tk.BOTH)
# Configure the frame
tk.Grid.rowconfigure( self.tab_axis, 0, weight=0) # x title
tk.Grid.rowconfigure( self.tab_axis, 1, weight=0) # x min
tk.Grid.rowconfigure( self.tab_axis, 2, weight=0) # x max
tk.Grid.rowconfigure( self.tab_axis, 3, weight=0) # x label
tk.Grid.rowconfigure( self.tab_axis, 4, weight=0) # x scale
tk.Grid.rowconfigure( self.tab_axis, 5, weight=0) # y title
tk.Grid.rowconfigure( self.tab_axis, 6, weight=0) # y min
tk.Grid.rowconfigure( self.tab_axis, 7, weight=0) # y max
tk.Grid.rowconfigure( self.tab_axis, 8, weight=0) # y label
tk.Grid.rowconfigure( self.tab_axis, 9, weight=0) # y scale
tk.Grid.columnconfigure(self.tab_axis, 0, weight=1, uniform="options")
tk.Grid.columnconfigure(self.tab_axis, 0, weight=1, uniform="options")
# Add elements to the frame
self.init_title()
self.init_xAxis()
self.init_yAxis()
if "twin axis" in list(self.graph.layout.keys()):
if self.graph.layout["twin axis"] == True:
self.init_yAxis_twin()
def init_title(self):
''' Change the title of the graph. '''
def update_title(event):
self.graph.label["title"] = self.var_title.get()
self.graph.ax.set_title(self.graph.label["title"])
self.window.update_graph()
def update_units(*args):
if self.var_units.get()=="Normalized":
if self.graph.range["units"]!="normalized":
self.graph.range["units"]="normalized"
self.graph.change_units()
self.var_xMin.set(round(self.graph.range["x"][0],2))
self.var_xMax.set(round(self.graph.range["x"][1],2))
self.var_yMin.set(round(self.graph.range["y"][0],2))
self.var_yMax.set(round(self.graph.range["y"][1],2))
self.var_xlabel.set(self.labels["x"])
self.var_ylabel.set(self.labels["y"])
if self.var_units.get()=="SI units":
if self.graph.range["units"]!="SI units":
self.graph.range["units"]="SI units"
self.graph.change_units()
self.var_xMin.set("{:.2e}".format(self.graph.range["x"][0],2))
self.var_xMax.set("{:.2e}".format(self.graph.range["x"][1],2))
self.var_yMin.set("{:.2e}".format(self.graph.range["y"][0],2))
self.var_yMax.set("{:.2e}".format(self.graph.range["y"][1],2))
self.var_xlabel.set(self.labels["x"])
self.var_ylabel.set(self.labels["y"])
# Change the graph title
self.lbl_graph = ttk.Label(self.tab_axis, text="Graph", style='prefTitle.TLabel')
self.var_title = tk.StringVar(value=self.labels["title"])
self.lbl_title = ttk.Label(self.tab_axis, text="Title")
self.ent_title = ttk.Entry(self.tab_axis, textvariable=self.var_title, width=20, style='opt_valueR.TEntry')
self.ent_title.bind('<Return>', update_title)
# Change the units
if self.graph.range["units"]!="N.A.":
self.var_units = tk.StringVar(value=self.options_units[0])
self.lbl_units = ttk.Label(self.tab_axis, text="Units")
self.mnu_units = ttk.OptionMenu(self.tab_axis, self.var_units, self.options_units[0], *self.options_units, style='option.TMenubutton')
self.mnu_units["menu"].config(bg=self.root.color['bbg'], fg=self.root.color['fg'])
if self.graph.range["units"]=="normalized": self.var_units.set(self.options_units[0])
if self.graph.range["units"]=="SI units": self.var_units.set(self.options_units[1])
self.var_units.trace('w', update_units) # link function to a change of the dropdown options
# Add the widgets to the frame
self.lbl_graph.grid( row=0, column=0, columnspan=2, **PAD_TITLE)
self.lbl_title.grid( row=1, column=0, **PAD_LABEL)
self.ent_title.grid( row=1, column=1, **PAD_ENTRY)
if self.graph.range["units"]!="N.A.":
self.lbl_units.grid( row=2, column=0, **PAD_LABEL)
self.mnu_units.grid( row=2, column=1, **PAD_ENTRY)
def init_xAxis(self):
''' Change the x-axis of the graph. '''
def update_xAxis(event):
range_ = [float(self.var_xMin.get()), float(self.var_xMax.get())]
self.graph.range["x"] = range_
self.graph.ax.set_xlim(range_)
self.window.update_graph()
def update_xLabel(event):
self.graph.label["x"] = self.var_xlabel.get()
self.graph.ax.set_xlabel(self.graph.label["x"])
self.window.update_graph()
def update_xscale(*args):
if self.var_xscale.get()=="Linear": scale='linear'
if self.var_xscale.get()=="Logaritmic": scale='log'
self.graph.range["x_scale"] = scale
self.graph.ax.set_xscale(scale)
# Logaritmic axis needs a positive start
if float(self.var_xMin.get()) <= 0:
if float(self.var_xMax.get()) > 10: self.var_xMin.set(1)
elif float(self.var_xMax.get()) > 1: self.var_xMin.set(0.1)
elif float(self.var_xMax.get()) > 0.1: self.var_xMin.set(0.01)
range_ = [float(self.var_xMin.get()), float(self.var_xMax.get())]
self.graph.range["x"] = range_
self.graph.ax.set_xlim(range_)
self.window.update_graph()
# Minimum and maximum of the x-axis
self.lbl_xTitle = ttk.Label(self.tab_axis, text=self.x_name, style='prefTitle.TLabel')
self.lbl_xMin = ttk.Label(self.tab_axis, text="Minimum")
self.lbl_xMax = ttk.Label(self.tab_axis, text="Maximum")
self.var_xMin = tk.StringVar(value=round(self.graph.range["x"][0],2))
self.var_xMax = tk.StringVar(value=round(self.graph.range["x"][1],2))
self.ent_xMin = ttk.Entry(self.tab_axis, textvariable=self.var_xMin, width=5, style='opt_valueR.TEntry')
self.ent_xMax = ttk.Entry(self.tab_axis, textvariable=self.var_xMax, width=5, style='opt_valueR.TEntry')
self.ent_xMin.bind('<Return>', update_xAxis)
self.ent_xMax.bind('<Return>', update_xAxis)
# Label for the x-axis
self.var_xlabel = tk.StringVar(value=self.labels["x"])
self.lbl_xlabel = ttk.Label(self.tab_axis, text="Label")
self.ent_xlabel = ttk.Entry(self.tab_axis, textvariable=self.var_xlabel, width=20, style='opt_valueR.TEntry')
self.ent_xlabel.bind('<Return>', update_xLabel)
# Choice between linear and log scales for the x-axis
self.var_xscale = tk.StringVar(value=self.options_scale[0])
self.lbl_xscale = ttk.Label(self.tab_axis, text="Scale")
self.mnu_xscale = ttk.OptionMenu(self.tab_axis, self.var_xscale, self.options_scale[0], *self.options_scale, style='option.TMenubutton')
self.mnu_xscale["menu"].config(bg=self.root.color['bbg'], fg=self.root.color['fg'])
if self.graph.range["x_scale"]=="linear": self.var_xscale.set(self.options_scale[0])
if self.graph.range["x_scale"]=="log": self.var_xscale.set(self.options_scale[1])
self.var_xscale.trace('w', update_xscale) # link function to a change of the dropdown options
# Add the labels to the frame
i=3
self.lbl_xTitle.grid( row=i+0, column=0, columnspan=2, **PAD_TITLE)
self.lbl_xMin.grid( row=i+1, column=0, **PAD_LABEL)
self.ent_xMin.grid( row=i+1, column=1, **PAD_ENTRY)
self.lbl_xMax.grid( row=i+2, column=0, **PAD_LABEL)
self.ent_xMax.grid( row=i+2, column=1, **PAD_ENTRY)
self.lbl_xlabel.grid( row=i+3, column=0, **PAD_LABEL)
self.ent_xlabel.grid( row=i+3, column=1, **PAD_ENTRY)
self.lbl_xscale.grid( row=i+4, column=0, **PAD_LABEL)
self.mnu_xscale.grid( row=i+4, column=1, **PAD_ENTRY)
def init_yAxis(self):
''' Change the y-axis of the graph. '''
def update_yAxis(event):
range_ = [float(self.var_yMin.get()), float(self.var_yMax.get())]
self.graph.range["y"] = range_
self.graph.ax.set_ylim(range_)
self.window.update_graph()
def update_yLabel(event):
self.graph.label["y"] = self.var_ylabel.get()
self.graph.ax.set_ylabel(self.graph.label["y"])
self.window.update_graph()
def update_yscale(*args):
if self.var_yscale.get()=="Linear": scale='linear'
if self.var_yscale.get()=="Logaritmic": scale='log'
self.graph.range["y_scale"] = scale
self.graph.ax.set_yscale(scale)
# Logaritmic axis needs a positive start
if float(self.var_yMin.get()) <= 0:
if float(self.var_yMax.get()) > 10: self.var_yMin.set(1)
elif float(self.var_yMax.get()) > 1: self.var_yMin.set(0.1)
elif float(self.var_yMax.get()) > 0.1: self.var_yMin.set(0.01)
range_ = [float(self.var_yMin.get()), float(self.var_yMax.get())]
self.graph.range["y"] = range_
self.graph.ax.set_ylim(range_)
self.window.update_graph()
# Minimum and maximum of the x-axis
self.lbl_yTitle = ttk.Label(self.tab_axis, text=self.y_name, style='prefTitle.TLabel')
self.lbl_space = ttk.Label(self.tab_axis, text=" ", style='prefTitle.TLabel')
self.lbl_yMin = ttk.Label(self.tab_axis, text="Minimum")
self.lbl_yMax = ttk.Label(self.tab_axis, text="Maximum")
self.var_yMin = tk.StringVar(value=round(self.graph.range["y"][0],2))
self.var_yMax = tk.StringVar(value=round(self.graph.range["y"][1],2))
self.ent_yMin = ttk.Entry(self.tab_axis, textvariable=self.var_yMin, width=5, style='opt_valueR.TEntry')
self.ent_yMax = ttk.Entry(self.tab_axis, textvariable=self.var_yMax, width=5, style='opt_valueR.TEntry')
self.ent_yMin.bind('<Return>', update_yAxis)
self.ent_yMax.bind('<Return>', update_yAxis)
# Label for the y-axis
self.var_ylabel = tk.StringVar(value=self.labels["y"])
self.lbl_ylabel = ttk.Label(self.tab_axis, text="Label")
self.ent_ylabel = ttk.Entry(self.tab_axis, textvariable=self.var_ylabel, width=20, style='opt_valueR.TEntry')
self.ent_ylabel.bind('<Return>', update_yLabel)
# Choice between linear and log scales for the x-axis
self.var_yscale = tk.StringVar(value=self.options_scale[0])
self.lbl_yscale = ttk.Label(self.tab_axis, text="Scale")
self.mnu_yscale = ttk.OptionMenu(self.tab_axis, self.var_yscale, self.options_scale[0], *self.options_scale, style='option.TMenubutton')
self.mnu_yscale["menu"].config(bg=self.root.color['bbg'], fg=self.root.color['fg'])
if self.graph.range["y_scale"]=="linear": self.var_yscale.set(self.options_scale[0])
if self.graph.range["y_scale"]=="log": self.var_yscale.set(self.options_scale[1])
self.var_yscale.trace('w', update_yscale) # link function to a change of the dropdown options
# Add the labels to the frame
i=3+5
self.lbl_yTitle.grid( row=i+0, column=0, columnspan=2, **PAD_TITLE)
self.lbl_yMin.grid( row=i+1, column=0, **PAD_LABEL)
self.ent_yMin.grid( row=i+1, column=1, **PAD_ENTRY)
self.lbl_yMax.grid( row=i+2, column=0, **PAD_LABEL)
self.ent_yMax.grid( row=i+2, column=1, **PAD_ENTRY)
self.lbl_ylabel.grid( row=i+3, column=0, **PAD_LABEL)
self.ent_ylabel.grid( row=i+3, column=1, **PAD_ENTRY)
self.lbl_yscale.grid( row=i+4, column=0, **PAD_LABEL)
self.mnu_yscale.grid( row=i+4, column=1, **PAD_ENTRY)
self.lbl_space.grid( row=i+5, column=0, columnspan=2, **PAD_TITLE)
# Prevent indentention of header comment on next lines
if True: return
def init_yAxis_twin(self):
''' Change the y-axis of the graph. '''
def update_ytwinAxis(event):
range_ = [float(self.var_ytwinMin.get()), float(self.var_ytwinMax.get())]
self.graph.range["ytwin"] = range_
self.graph.ax_twin.set_ylim(range_)
self.window.update_graph()
def update_ytwinLabel(event):
self.graph.label["ytwin"] = self.var_ytwinlabel.get()
self.graph.ax_twin.set_ylabel(self.graph.label["ytwin"])
self.window.update_graph()
def update_ytwinscale(*args):
if self.var_ytwinscale.get()=="Linear": scale='linear'
if self.var_ytwinscale.get()=="Logaritmic": scale='log'
self.graph.range["ytwin_scale"] = scale
self.graph.ax_twin.set_yscale(scale)
# Logaritmic axis needs a positive start
if float(self.var_ytwinMin.get()) <= 0:
if float(self.var_ytwinMax.get()) > 10: self.var_ytwinMin.set(1)
elif float(self.var_ytwinMax.get()) > 1: self.var_ytwinMin.set(0.1)
elif float(self.var_ytwinMax.get()) > 0.1: self.var_ytwinMin.set(0.01)
range_ = [float(self.var_ytwinMin.get()), float(self.var_ytwinMax.get())]
self.graph.range["ytwin"] = range_
self.graph.ax_twin.set_ylim(range_)
self.window.update_graph()
# Minimum and maximum of the twinned y-axis
self.lbl_ytwinTitle = ttk.Label(self.tab_axis, text=self.ytwin_name, style='prefTitle.TLabel')
self.lbl_space = ttk.Label(self.tab_axis, text=" ", style='prefTitle.TLabel')
self.lbl_ytwinMin = ttk.Label(self.tab_axis, text="Minimum")
self.lbl_ytwinMax = ttk.Label(self.tab_axis, text="Maximum")
self.var_ytwinMin = tk.StringVar(value=round(self.graph.range["ytwin"][0],2))
self.var_ytwinMax = tk.StringVar(value=round(self.graph.range["ytwin"][1],2))
self.ent_ytwinMin = ttk.Entry(self.tab_axis, textvariable=self.var_ytwinMin, width=5, style='opt_valueR.TEntry')
self.ent_ytwinMax = ttk.Entry(self.tab_axis, textvariable=self.var_ytwinMax, width=5, style='opt_valueR.TEntry')
self.ent_ytwinMin.bind('<Return>', update_ytwinAxis)
self.ent_ytwinMax.bind('<Return>', update_ytwinAxis)
# Label for the ytwin-axis
self.var_ytwinlabel = tk.StringVar(value=self.labels["ytwin"])
self.lbl_ytwinlabel = ttk.Label(self.tab_axis, text="Label")
self.ent_ytwinlabel = ttk.Entry(self.tab_axis, textvariable=self.var_ytwinlabel, width=20, style='opt_valueR.TEntry')
self.ent_ytwinlabel.bind('<Return>', update_ytwinLabel)
# Choice between linear and log scales for the x-axis
self.var_ytwinscale = tk.StringVar(value=self.options_scale[0])
self.lbl_ytwinscale = ttk.Label(self.tab_axis, text="Scale")
self.mnu_ytwinscale = ttk.OptionMenu(self.tab_axis, self.var_ytwinscale, self.options_scale[0], *self.options_scale, style='option.TMenubutton')
self.mnu_ytwinscale["menu"].config(bg=self.root.color['bbg'], fg=self.root.color['fg'])
if self.graph.range["y_scale"]=="linear": self.var_ytwinscale.set(self.options_scale[0])
if self.graph.range["y_scale"]=="log": self.var_ytwinscale.set(self.options_scale[1])
self.var_ytwinscale.trace('w', update_ytwinscale) # link function to a change of the dropdown options
# Add the labels to the frame
i=3+5+5
self.lbl_ytwinTitle.grid( row=i+0, column=0, columnspan=2, **PAD_TITLE)
self.lbl_ytwinMin.grid( row=i+1, column=0, **PAD_LABEL)
self.ent_ytwinMin.grid( row=i+1, column=1, **PAD_ENTRY)
self.lbl_ytwinMax.grid( row=i+2, column=0, **PAD_LABEL)
self.ent_ytwinMax.grid( row=i+2, column=1, **PAD_ENTRY)
self.lbl_ytwinlabel.grid( row=i+3, column=0, **PAD_LABEL)
self.ent_ytwinlabel.grid( row=i+3, column=1, **PAD_ENTRY)
self.lbl_ytwinscale.grid( row=i+4, column=0, **PAD_LABEL)
self.mnu_ytwinscale.grid( row=i+4, column=1, **PAD_ENTRY)
self.lbl_space.grid( row=i+5, column=0, columnspan=2, **PAD_TITLE)
# Prevent indentention of header comment on next lines
if True: return
#==================================================================
# Add the tab controling the appearance to the options window
#==================================================================
class tabCurves:
def __init__(self, window, tab, graph):
# Get data from the GUI
self.root = window.root
self.tab = tab
self.graph = graph
self.layout = graph.layout
# Create the frame
self.tab_curves = ttk.Frame(window)
self.tab_curves.pack(expand=1, fill=tk.BOTH)
# Configure the frame
tk.Grid.rowconfigure( self.tab_curves, 0, weight=0) # Appearanc title
tk.Grid.rowconfigure( self.tab_curves, 1, weight=0) # Background
tk.Grid.rowconfigure( self.tab_curves, 2, weight=0) # Font size
tk.Grid.rowconfigure( self.tab_curves, 3, weight=0) # Handle length
tk.Grid.columnconfigure(self.tab_curves, 0, weight=1, uniform="options")
tk.Grid.columnconfigure(self.tab_curves, 0, weight=1, uniform="options")
# Add elements to the frame
self.init_appearance()
return
def init_appearance(self):
# Width of the entry widget
width=5
# Change background color, font size and handle length
self.lbl_aTitle = ttk.Label(self.tab_curves, text="Appearance", style='prefTitle.TLabel')
self.var_bg = tk.StringVar(value=self.root.color['bg'])
self.var_fs = tk.StringVar(value=self.layout['fontsize'])
self.var_hl = tk.StringVar(value=self.layout['handlelength'])
self.lbl_bg = ttk.Label(self.tab_curves, text="Background color")
self.lbl_fs = ttk.Label(self.tab_curves, text="Font size")
self.lbl_hl = ttk.Label(self.tab_curves, text="Handle length")
self.ent_bg = ttk.Entry(self.tab_curves, textvariable=self.var_bg, width=width, style='opt_valueR.TEntry')
self.ent_fs = ttk.Entry(self.tab_curves, textvariable=self.var_fs, width=width, style='opt_valueR.TEntry')
self.ent_hl = ttk.Entry(self.tab_curves, textvariable=self.var_hl, width=width, style='opt_valueR.TEntry')
# Add the labels to the frame
self.lbl_aTitle.grid( row=1, column=0, columnspan=2, **PAD_TITLE)
self.lbl_bg.grid( row=2, column=0, **PAD_LABEL)
self.ent_bg.grid( row=2, column=1, **PAD_ENTRY)
self.lbl_fs.grid( row=3, column=0, **PAD_LABEL)
self.ent_fs.grid( row=3, column=1, **PAD_ENTRY)
self.lbl_hl.grid( row=4, column=0, **PAD_LABEL)
self.ent_hl.grid( row=4, column=1, **PAD_ENTRY)
# print(self.mnu_xscale["menu"].keys())
# ['activebackground', 'activeborderwidth', 'activeforeground', 'background', 'bd', 'bg',
# 'borderwidth', 'cursor', 'disabledforeground', 'fg', 'font', 'foreground', 'postcommand',
# 'relief', 'selectcolor', 'takefocus', 'tearoff', 'tearoffcommand', 'title', 'type']
|
StarcoderdataPython
|
9741982
|
<reponame>surf-sci-bc/uspy
"""Data directories."""
from pathlib import Path
from uspy.version import __version__
DATADIR = str(Path.home() / "data") + "/"
LEEMDIR = DATADIR + "LEEM/"
XPSDIR = DATADIR + "XPS/"
STMDIR = DATADIR + "STM/home/stmwizard/Documents/"
|
StarcoderdataPython
|
3282523
|
<filename>TwitOff/twitter_service.py
import os
from dotenv import load_dotenv
import tweepy
load_dotenv()
TWITTER_API_KEY = os.getenv("TWITTER_API_KEY")
TWITTER_API_SECRET = os.getenv("TWITTER_API_SECRET")
TWITTER_ACCESS_TOKEN = os.getenv("TWITTER_ACCESS_TOKEN")
TWITTER_ACCESS_TOKEN_SECRET = os.getenv("TWITTER_ACCESS_TOKEN_SECRET")
def twitter_api():
auth = tweepy.OAuthHandler(TWITTER_API_KEY,
TWITTER_API_SECRET)
auth.set_access_token(TWITTER_ACCESS_TOKEN,
TWITTER_ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
return api
if __name__ == "__main__":
api = twitter_api()
user = api.get_user("tferriss")
print("USER", user)
print(user.screen_name)
print(user.name)
print(user.followers_count)
|
StarcoderdataPython
|
317561
|
from CommandBase import *
import json
from MythicResponseRPC import *
class ITermArguments(TaskArguments):
def __init__(self, command_line):
super().__init__(command_line)
self.args = {}
async def parse_arguments(self):
pass
class ITermCommand(CommandBase):
cmd = "iTerm"
needs_admin = False
help_cmd = "iTerm"
description = "Read the contents of all open iTerm tabs if iTerms is open, otherwise just inform the operator that it's not currently running"
version = 1
is_exit = False
is_file_browse = False
is_process_list = False
is_download_file = False
is_remove_file = False
is_upload_file = False
author = "@its_a_feature_"
attackmapping = ["T1139", "T1056"]
argument_class = ITermArguments
async def create_tasking(self, task: MythicTask) -> MythicTask:
resp = await MythicResponseRPC(task).register_artifact(
artifact_instance="Target Application of iTerm",
artifact_type="AppleEvent Sent",
)
return task
async def process_response(self, response: AgentResponse):
pass
|
StarcoderdataPython
|
6425506
|
<reponame>getty708/atr-tk<gh_stars>0
""" Initialize sensor nodes parameters.
"""
from tsndctl.device import TSND151
import time
from logging import getLogger
import hydra
from omegaconf import DictConfig, OmegaConf
logger = getLogger(__name__)
@hydra.main(config_path="conf", config_name="config.yaml")
def main(cfg: DictConfig):
print(OmegaConf.to_yaml(cfg))
logger.info("== Initialize Sensor Node Parameters ==")
# -- Initialize client object --
client = TSND151(cfg.client.name, cfg.client.port, timeout=cfg.timeout)
time.sleep(5)
logger.debug("Success ... Initialize TSND151() object and open connection.")
# -- Update Senor Parameters --
client.init_device()
time.sleep(5)
logger.info("Success ... Parameters are updated!")
# -- End --
client.terminate()
logger.info("Success ... Connection closed.")
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
6652662
|
# SPDX-License-Identifier: BSD-3-Clause
import pytest
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from skhubness.reduction import LocalScaling
from skhubness.neighbors import NearestNeighbors
LS_METHODS = ['standard', 'nicdm',
]
@pytest.mark.parametrize('method', LS_METHODS)
@pytest.mark.parametrize('verbose', [0, 1])
def test_fit_sorted(method, verbose):
X, y = make_classification()
nn = NearestNeighbors()
nn.fit(X, y)
neigh_dist, neigh_ind = nn.kneighbors()
ls = LocalScaling(method=method, verbose=verbose)
nd_sorted, ni_sorted = ls.fit(neigh_dist, neigh_ind, X, assume_sorted=True)\
.transform(neigh_dist, neigh_ind, X, assume_sorted=True)
nd_unsort, ni_unsort = ls.fit(neigh_dist, neigh_ind, X, assume_sorted=False)\
.transform(neigh_dist, neigh_ind, X, assume_sorted=False)
assert_array_almost_equal(nd_sorted, nd_unsort)
assert_array_equal(ni_sorted, ni_unsort)
@pytest.mark.parametrize('method', ['invalid', None])
def test_invalid_method(method):
X, y = make_classification(n_samples=10, )
nn = NearestNeighbors()
nn.fit(X, y)
neigh_dist, neigh_ind = nn.kneighbors()
ls = LocalScaling(method=method)
ls.fit(neigh_dist, neigh_ind, X, assume_sorted=True)
with assert_raises(ValueError):
_ = ls.transform(neigh_dist, neigh_ind, X, assume_sorted=True)
|
StarcoderdataPython
|
4956602
|
<reponame>peterkulik/ois_api_client<gh_stars>1-10
from typing import Optional
import xml.etree.ElementTree as ET
from ...xml.XmlReader import XmlReader as XR
from ..namespaces import DATA
from ..dto.CustomerInfo import CustomerInfo
from .deserialize_address import deserialize_address
from .deserialize_tax_number import deserialize_tax_number
def deserialize_customer_info(element: ET.Element) -> Optional[CustomerInfo]:
if element is None:
return None
result = CustomerInfo(
customer_tax_number=deserialize_tax_number(
XR.find_child(element, 'customerTaxNumber', DATA)
),
group_member_tax_number=deserialize_tax_number(
XR.find_child(element, 'groupMemberTaxNumber', DATA)
),
community_vat_number=XR.get_child_text(element, 'communityVatNumber', DATA),
third_state_tax_id=XR.get_child_text(element, 'thirdStateTaxId', DATA),
customer_name=XR.get_child_text(element, 'customerName', DATA),
customer_address=deserialize_address(
XR.find_child(element, 'customerAddress', DATA)
),
customer_bank_account_number=XR.get_child_text(element, 'customerBankAccountNumber', DATA),
)
return result
|
StarcoderdataPython
|
6680389
|
<filename>src/models/wisenet_base/models/lcfcn.py
import torch
import torch.nn as nn
import torchvision
import numpy as np
from .. import misc as ms
from .. import ann_utils as au
import torch.nn.functional as F
from . import base_model as bm
from skimage import morphology as morph
class LCFCN_BO(bm.BaseModel):
def __init__(self, train_set, **model_options):
super().__init__(train_set, **model_options)
self.feature_extracter = bm.FeatureExtracter()
self.blob_head = bm.Upsampler(self.feature_extracter.expansion_rate,
train_set.n_classes)
def forward(self, x_input):
x_8s, x_16s, x_32s = self.feature_extracter.extract_features(x_input)
blob_mask = self.blob_head.upsample(x_input, x_8s, x_16s, x_32s)
return blob_mask
def get_blobs(self, p_labels, return_counts=False):
from skimage import morphology as morph
p_labels = ms.t2n(p_labels)
n,h,w = p_labels.shape
blobs = np.zeros((n, self.n_classes-1, h, w))
counts = np.zeros((n, self.n_classes-1))
# Binary case
for i in range(n):
for l in np.unique(p_labels[i]):
if l == 0:
continue
blobs[i,l-1] = morph.label(p_labels==l)
counts[i, l-1] = (np.unique(blobs[i,l-1]) != 0).sum()
blobs = blobs.astype(int)
if return_counts:
return blobs, counts
return blobs
def predict(self, batch, predict_method="blobs", proposal_type="sharp"):
# self.sanity_checks(batch)
self.eval()
predict_method = "blob_annList"
n,c,h,w = batch["images"].shape
O = self(batch["images"].cuda())
probs = F.softmax(O, dim=1)
blob_dict = au.probs2blobs(probs)
if predict_method == 'pointList':
return {"pointList":blob_dict["pointList"],
"blobs":blob_dict['blobs'],
"probs":blob_dict["probs"]}
###
if predict_method == "blob_annList":
annList = blob_dict["annList"]
for ann in annList:
ann["image_id"] = batch["name"][0]
ann["score"] = 1.0
return {"annList":annList}
if predict_method == 'blobs_probs':
blobs = self.get_blobs(O.max(1)[1])
return blobs, probs
if predict_method == 'original':
return {"blobs":blob_dict['blobs'],
"probs":blob_dict['probs'],
"annList":blob_dict['annList'],
"counts":blob_dict['counts']}
head_pointList = blob_dict["pointList"]
if len(head_pointList) == 0:
return {"blobs": np.zeros((h,w), int),
"annList":[]}
pred_dict = au.pointList2BestObjectness(head_pointList, batch)
return pred_dict
class LCFCN_Pyramid(LCFCN_BO):
def __init__(self, train_set, **model_options):
super().__init__(train_set, **model_options)
self.feature_extracter = bm.FeatureExtracterPyramid()
from ..models import gam
class LCFCN_Regularized(LCFCN_BO):
def __init__(self, train_set, **model_options):
super().__init__(train_set, **model_options)
backbone = gam.vgg16(pretrained=True)
self.tmp = list(backbone.features.children());
for i in range(8) :
self.tmp.pop();
# replace relu layers with prelu
# self.replace_relu_with_prelu();
self.feature_extracter = nn.Sequential(*self.tmp);
self.regressor = nn.Linear(in_features=512, out_features=self.n_classes);
def replace_relu_with_prelu(self) :
id_relu = [1,3,6,8,11,13,15,18,20,22];
for i in id_relu :
self.tmp[i] = nn.PReLU(self.tmp[i-1].out_channels);
def forward(self, x):
n,c,h,w = x.shape
x_feat = self.feature_extracter(x)
# O = self.blob_head.upsample(x, x_8s, x_16s, x_32s)
x_feat = x_feat.abs();
input_size = (x_feat.size(2), x_feat.size(3))
count = F.avg_pool2d(x_feat, kernel_size=input_size, stride=input_size)
count = count.view(count.size(0), -1);
count = self.regressor(count);
shape = [self.n_classes] + list(x_feat.shape[-2:])
x_feat = x_feat.view(x_feat.size(0), x_feat.size(1), -1);
x_feat = x_feat.mul(self.regressor.weight.data.unsqueeze(2));
x_feat = x_feat.sum(1);
x_feat = x_feat.abs();
max_, _ = x_feat.data.max(1);
x_feat.data.div_(max_.unsqueeze(1).expand_as(x_feat));
x_feat = x_feat.reshape(shape)
x_feat = F.interpolate(x_feat[None], size=(h,w), mode="bilinear")
return {"count":count, "cam":x_feat}
def predict(self, batch, predict_method="blobs", proposal_type="sharp"):
self.sanity_checks(batch)
self.eval()
n,c,h,w = batch["images"].shape
O = self(batch["images"].cuda())["cam"]
probs = F.softmax(O, dim=1)
blob_dict = au.probs2blobs(probs)
if predict_method == 'blobs_probs':
blobs = self.get_blobs(O.max(1)[1])
return blobs, probs
if predict_method == 'original':
return {"blobs":blob_dict['blobs'],
"probs":blob_dict['probs'],
"annList":blob_dict['annList'],
"counts":blob_dict['counts']}
head_pointList = blob_dict["pointList"]
if len(head_pointList) == 0:
return {"blobs": np.zeros((h,w), int), "annList":[]}
pred_dict = au.pointList2BestObjectness(head_pointList, batch)
return pred_dict
class LCFCN_Dilated(LCFCN_BO):
def __init__(self, train_set, **model_options):
super().__init__(train_set, **model_options)
self.feature_extracter = bm.FeatureExtracterDilated()
# self.feature_extracter = bm.FeatureExtracterPyramid()
class Res50FCN(bm.BaseModel):
def __init__(self, train_set):
super().__init__(train_set)
num_classes = train_set.n_classes
# Load the pretrained weights, remove avg pool
# layer and get the output stride of 8
resnet50_32s = torchvision.models.resnet50(pretrained=True)
resnet_block_expansion_rate = resnet50_32s.layer1[0].expansion
# Create a linear layer -- we don't need logits in this case
resnet50_32s.fc = nn.Sequential()
self.resnet50_32s = resnet50_32s
self.score_32s = nn.Conv2d(512 * resnet_block_expansion_rate,
num_classes,
kernel_size=1)
self.score_16s = nn.Conv2d(256 * resnet_block_expansion_rate,
num_classes,
kernel_size=1)
self.score_8s = nn.Conv2d(128 * resnet_block_expansion_rate,
num_classes,
kernel_size=1)
# # FREEZE BATCH NORMS
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.requires_grad = False
m.bias.requires_grad = False
def forward(self, x):
self.resnet50_32s.eval()
input_spatial_dim = x.size()[2:]
x = self.resnet50_32s.conv1(x)
x = self.resnet50_32s.bn1(x)
x = self.resnet50_32s.relu(x)
x = self.resnet50_32s.maxpool(x)
x = self.resnet50_32s.layer1(x)
x = self.resnet50_32s.layer2(x)
logits_8s = self.score_8s(x)
x = self.resnet50_32s.layer3(x)
logits_16s = self.score_16s(x)
x = self.resnet50_32s.layer4(x)
logits_32s = self.score_32s(x)
logits_16s_spatial_dim = logits_16s.size()[2:]
logits_8s_spatial_dim = logits_8s.size()[2:]
logits_16s += nn.functional.upsample(logits_32s,
size=logits_16s_spatial_dim,
mode="bilinear",
align_corners=True)
logits_8s += nn.functional.upsample(logits_16s,
size=logits_8s_spatial_dim,
mode="bilinear",
align_corners=True)
logits_upsampled = nn.functional.upsample(logits_8s,
size=input_spatial_dim,
mode="bilinear",
align_corners=True)
return logits_upsampled
def get_blobs(self, p_labels, return_counts=False):
from skimage import morphology as morph
p_labels = ms.t2n(p_labels)
n,h,w = p_labels.shape
blobs = np.zeros((n, self.n_classes-1, h, w))
counts = np.zeros((n, self.n_classes-1))
# Binary case
for i in range(n):
for l in np.unique(p_labels[i]):
if l == 0:
continue
blobs[i,l-1] = morph.label(p_labels==l)
counts[i, l-1] = (np.unique(blobs[i,l-1]) != 0).sum()
blobs = blobs.astype(int)
if return_counts:
return blobs, counts
return blobs
def predict(self, batch, predict_method="blobs", proposal_type="sharp"):
self.sanity_checks(batch)
self.eval()
n,c,h,w = batch["images"].shape
O = self(batch["images"].cuda())
probs = F.softmax(O, dim=1)
blob_dict = au.probs2blobs(probs)
if predict_method == 'blobs_probs':
blobs = self.get_blobs(O.max(1)[1])
return blobs, probs
if predict_method == 'original':
return {"blobs":blob_dict['blobs'],
"probs":blob_dict['probs'],
"annList":blob_dict['annList'],
"counts":blob_dict['counts']}
head_pointList = blob_dict["pointList"]
if len(head_pointList) == 0:
return {"blobs": np.zeros((h,w), int), "annList":[]}
pred_dict = au.pointList2BestObjectness(head_pointList, batch)
return pred_dict
def visualize(self, batch):
pred_dict = self.predict(batch, "blobs")
ms.images(batch["images"], pred_dict["blobs"].astype(int), denorm=1)
# class LCFCN_BO(bm.BaseModel):
# def __init__(self, train_set, **model_options):
# super().__init__(train_set, **model_options)
# self.feature_extracter = bm.FeatureExtracter()
# self.blob_head = bm.Upsampler(self.feature_extracter.expansion_rate, train_set.n_classes)
# # self.pointDict = au.load_LCFCNPoints({"dataset_name":type(train_set).__name__})
# def forward(self, x_input):
# x_8s, x_16s, x_32s = self.feature_extracter.extract_features(x_input)
# blob_mask = self.blob_head.upsample(x_input, x_8s, x_16s, x_32s)
# return blob_mask
class LCFCN(bm.BaseModel):
def __init__(self, train_set, **model_options):
super().__init__(train_set, **model_options)
self.feature_extracter = bm.FeatureExtracter()
self.blob_head = bm.Upsampler(self.feature_extracter.expansion_rate, train_set.n_classes)
# self.pointDict = au.load_LCFCNPoints({"dataset_name":type(train_set).__name__})
def forward(self, x_input):
x_8s, x_16s, x_32s = self.feature_extracter.extract_features(x_input)
blob_mask = self.blob_head.upsample(x_input, x_8s, x_16s, x_32s)
return blob_mask
class LCFCN_Strong(bm.BaseModel):
def __init__(self, train_set, **model_options):
super().__init__(train_set, **model_options)
self.feature_extracter = bm.FeatureExtracterStrong()
self.blob_head = bm.UpsamplerStrong(self.feature_extracter.expansion_rate, 21)
# self.pointDict = au.load_LCFCNPoints({"dataset_name":type(train_set).__name__})
def forward(self, x_input):
x_8s, x_16s, x_32s = self.feature_extracter.extract_features(x_input)
blob_mask = self.blob_head.upsample(x_input, x_8s, x_16s, x_32s)
return blob_mask
class LCFCN_BO_Expanded(bm.BaseModel):
def __init__(self, train_set, **model_options):
super().__init__(train_set, **model_options)
num_classes = train_set.n_classes
# Load the pretrained weights, remove avg pool
# layer and get the output stride of 8
resnet50_32s = torchvision.models.resnet50(pretrained=True)
resnet_block_expansion_rate = resnet50_32s.layer1[0].expansion
# Create a linear layer -- we don't need logits in this case
resnet50_32s.fc = nn.Sequential()
self.resnet50_32s = resnet50_32s
self.score_32s = nn.Conv2d(512 * resnet_block_expansion_rate,
num_classes,
kernel_size=1)
self.score_16s = nn.Conv2d(256 * resnet_block_expansion_rate,
num_classes,
kernel_size=1)
self.score_8s = nn.Conv2d(128 * resnet_block_expansion_rate,
num_classes,
kernel_size=1)
# # FREEZE BATCH NORMS
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.requires_grad = False
m.bias.requires_grad = False
def forward(self, x):
self.resnet50_32s.eval()
input_spatial_dim = x.size()[2:]
x = self.resnet50_32s.conv1(x)
x = self.resnet50_32s.bn1(x)
x = self.resnet50_32s.relu(x)
x = self.resnet50_32s.maxpool(x)
x = self.resnet50_32s.layer1(x)
x = self.resnet50_32s.layer2(x)
logits_8s = self.score_8s(x)
x = self.resnet50_32s.layer3(x)
logits_16s = self.score_16s(x)
x = self.resnet50_32s.layer4(x)
logits_32s = self.score_32s(x)
logits_16s_spatial_dim = logits_16s.size()[2:]
logits_8s_spatial_dim = logits_8s.size()[2:]
logits_16s += nn.functional.upsample(logits_32s,
size=logits_16s_spatial_dim,
mode="bilinear",
align_corners=True)
logits_8s += nn.functional.upsample(logits_16s,
size=logits_8s_spatial_dim,
mode="bilinear",
align_corners=True)
logits_upsampled = nn.functional.upsample(logits_8s,
size=input_spatial_dim,
mode="bilinear",
align_corners=True)
return logits_upsampled
def get_blobs(self, p_labels, return_counts=False):
from skimage import morphology as morph
p_labels = ms.t2n(p_labels)
n,h,w = p_labels.shape
blobs = np.zeros((n, self.n_classes-1, h, w))
counts = np.zeros((n, self.n_classes-1))
# Binary case
for i in range(n):
for l in np.unique(p_labels[i]):
if l == 0:
continue
blobs[i,l-1] = morph.label(p_labels==l)
counts[i, l-1] = (np.unique(blobs[i,l-1]) != 0).sum()
blobs = blobs.astype(int)
if return_counts:
return blobs, counts
return blobs
def predict(self, batch, predict_method="blobs", proposal_type="sharp"):
# self.sanity_checks(batch)
self.eval()
n,c,h,w = batch["images"].shape
O = self(batch["images"].cuda())
probs = F.softmax(O, dim=1)
blob_dict = au.probs2blobs(probs)
if predict_method == 'pointList':
return {"pointList":blob_dict["pointList"],
"blobs":blob_dict['blobs'],
"probs":blob_dict["probs"]}
if predict_method == 'blobs_probs':
blobs = self.get_blobs(O.max(1)[1])
return blobs, probs
if predict_method == 'original':
return {"blobs":blob_dict['blobs'],
"probs":blob_dict['probs'],
"annList":blob_dict['annList'],
"counts":blob_dict['counts']}
head_pointList = blob_dict["pointList"]
if len(head_pointList) == 0:
return {"blobs": np.zeros((h,w), int), "annList":[]}
pred_dict = au.pointList2BestObjectness(head_pointList, batch)
return pred_dict
|
StarcoderdataPython
|
4864790
|
import pathlib
import json
import enum
from cvat.apps.engine.models import Task
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from opentpod.object_detector import provider
class Status(enum.Enum):
CREATED = 'created'
TRAINING = 'training'
TRAINED = 'trained'
ERRORED = 'error'
@classmethod
def choices(self):
return tuple((x.value, x.name) for x in self)
def __str__(self):
return self.value
class TrainSet(models.Model):
"""A set of training videos.
"""
name = models.CharField(max_length=256)
owner = models.ForeignKey(User, null=True, blank=True, on_delete=models.CASCADE)
created_date = models.DateTimeField(auto_now_add=True)
tasks = models.ManyToManyField(Task)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class Detector(models.Model):
"""Trained Detector
"""
name = models.CharField(max_length=256)
owner = models.ForeignKey(User, null=True, blank=True, on_delete=models.SET_NULL)
created_date = models.DateTimeField(auto_now_add=True)
updated_date = models.DateTimeField(auto_now_add=True)
status = models.CharField(max_length=32, choices=Status.choices(),
default=str(Status.CREATED), null=True, blank=True)
dnn_type = models.CharField(max_length=32,
choices=provider.DNN_TYPE_DB_CHOICES)
# where this model is finetuned from
parent = models.ForeignKey('self', null=True,
blank=True, on_delete=models.SET_NULL)
train_set = models.ForeignKey(TrainSet, null=True,
on_delete=models.SET_NULL)
train_config = models.CharField(max_length=10000)
# constants
_CONTAINER_NAME_FORMAT = 'opentpod-detector-{}'
class Meta:
ordering = ['id']
def __str__(self):
return '{}-{}'.format(self.pk, self.name)
def get_dir(self):
return pathlib.Path(settings.VAR_DIR) / 'detectors' / str(self.id)
def get_training_data_dir(self):
return self.get_dir() / 'train-data'
def get_model_dir(self):
return self.get_dir() / 'models'
def get_export_file_path(self):
return self.get_dir() / '{}-frozen-graph.zip'.format(self.name)
def get_container_name(self):
return self._CONTAINER_NAME_FORMAT.format(self.id)
def get_train_config(self):
return json.loads(self.train_config)
def get_detector_object(self):
config = self.get_train_config()
config['input_dir'] = self.get_training_data_dir().resolve()
config['output_dir'] = self.get_model_dir().resolve()
detector_class = provider.get(self.dnn_type)
return detector_class(config)
|
StarcoderdataPython
|
9723073
|
<gh_stars>0
import pickle
import click
import holoviews as hv
import hvplot
import hvplot.pandas # noqa
import pandas as pd
from bokeh.io import export_svgs
from .results import load_scores_errs
def export_svg(obj, filename):
plot_state = hv.renderer("bokeh").get_plot(obj).state
plot_state.output_backend = "svg"
export_svgs(plot_state, filename=filename)
pd.options.plotting.backend = "holoviews"
@click.command()
@click.argument("scores")
@click.argument("abilities", type=click.File("rb"))
@click.option("--out", type=click.Path(), required=False, multiple=True)
def main(scores, abilities, out):
abilities = pickle.load(abilities)["abilities"]
df = load_scores_errs(scores, abilities)
df.reset_index(inplace=True)
df_both = df[df["split_mode"] == "both"]
plot_auroc = df_both.hvplot.violin(
y="auroc", by=["strategy", "discrim", "diff"], ylabel="AUROC", rot=90,
)
plot_mcc = df_both.hvplot.violin(
y="mcc", by=["strategy", "discrim", "diff"], ylabel="MCC", rot=90,
)
plot_abs_theta_err = df.hvplot.violin(
y="abs_theta_err",
by=["split_mode", "strategy"],
ylabel="abs theta err",
rot=90,
)
if out:
export_svg(plot_auroc, out[0])
export_svg(plot_mcc, out[1])
export_svg(plot_abs_theta_err, out[2])
# hv.save(plot_auroc, out[0])
# hv.save(plot_abs_theta_err, out[1])
else:
hvplot.show(plot_auroc + plot_abs_theta_err)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1698400
|
from flask import jsonify
import cloudinary
import cloudinary.uploader
import cloudinary.api as cloudAPI
from dotenv import load_dotenv
import os
load_dotenv()
cloud_name = os.environ["CLOUD_NAME"]
cloud_api_key = os.environ["API_KEY"]
cloud_api_secret = os.environ["API_SECRET"]
cloud_folder = os.environ["CLOUD_FOLDER"]
def connectCloudinary():
try:
cloudinary.config(
cloud_name = cloud_name,
api_key = cloud_api_key,
api_secret = cloud_api_secret
)
response = jsonify({"result":200})
response.status_code == 200
return response
except:
response = jsonify({"result": 500})
response.status_code == 400
return response
def cloud_save(audioFile):
audioFile = 'uploads/'+audioFile.filename
check_connection = connectCloudinary()
if check_connection.status_code == 200:
try:
save_file = cloudinary.uploader.upload(
audioFile,
resource_type = "video",
folder = "AudioFiles"
)
file_url = save_file['url']
return file_url
except Exception as error:
print(error)
else:
return check_connection
|
StarcoderdataPython
|
6573514
|
<reponame>arfu2016/DuReader
"""
@Project : DuReader
@Module : module_test.py
@Author : Deco [<EMAIL>]
@Created : 5/14/18 1:51 PM
@Desc :
"""
import os
import sys
base_dir = os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)))
if base_dir not in sys.path:
sys.path.insert(0, base_dir)
from mysql2.mysql_settings import test, save_data
if __name__ == "__main__":
# test()
save_data()
|
StarcoderdataPython
|
1997314
|
<filename>tools/compile.py
#! /usr/bin/env python
# Copyright (C) 2020 Airbus, <EMAIL>
import sys, os
sys.path.insert(1, os.path.abspath(sys.path[0]+'/..'))
from plasmasm.python.utils import spawn
from tools.step2 import *
from tools.step2_plasmasm import *
from tools.step2_change import *
def usage():
sys.stderr.write("Syntax:\tcompile.py [-v] <step2 definition> <compiler command>\n")
sys.stderr.write("How does it work?\n")
sys.stderr.write("\tcompile.py compiles in three steps\n")
sys.stderr.write("\tStep 1: generation of an intermediate result\n")
sys.stderr.write("\tStep 2: analysis or modification of this result\n")
sys.stderr.write("\tStep 3: generation of the final result\n")
sys.stderr.write("<step2 definition> can be used as shown below:\n")
sys.stderr.write("\t(compiler command) gcc -c foo.c\n")
for cls in Step2MetaClass.registered:
sys.stderr.write("\tcompile.py %s gcc -c foo.c\n" % cls.keyword)
sys.stderr.write("\t\t%s\n" % cls.help)
def parse_options(argv):
if len(argv) == 0:
usage()
sys.exit(0)
# Hack for when gnatmake is used
compiler_options = []
if argv[0] == '-gnatea':
# When using gnatmake --GCC='compile.py [options] gcc-4.9' then gnatmake
# is calling compile.py -gnatea -c -Isrc/ [options] gcc-4.9
# instead of compile.py [options] gcc-4.9 -gnatea -c -Isrc/
# We need to move these compiler options
assert argv[1] == '-c'
compiler_options += argv[:2]
argv[:2] = []
if argv[0].startswith('-I'):
compiler_options += argv[0:1]
argv[0:1] = []
# Step2 has to be defined
if not argv[0].startswith('-'):
usage()
sys.exit(0)
# Remove verbose flag
if len(argv) and argv[0] == '-v':
verbose = True
argv[0:1] = []
else:
verbose = False
if len(argv) == 0:
usage()
sys.exit(0)
# Get step2 definition
step2 = Step2(argv)
# Put the compilers options at the right position
command = argv[len(step2.param):]
command[0:0] = compiler_options
if len(command) == 0:
usage()
sys.exit(0)
return verbose, step2, command
def remove_M_options(command):
idx = 0
while idx < len(command):
if command[idx] in ('-M', '-MM', '-MG', '-MP', '-MMD'):
command[idx:idx+1] = []
elif command[idx] in ('-MF', '-MT', '-MQ', '-MD'):
command[idx:idx+2] = []
else:
idx += 1
def find_input_idx(command):
for idx in [ _ for _ in range(len(command))
# C
if command[_].endswith('.c')
# Fortran
or command[_].endswith('.f')
or command[_].endswith('.F')
# C++
or command[_].endswith('.cpp')
or command[_].endswith('.cc')
# Assembly
or command[_].endswith('.s')
# Ada
or command[_].endswith('.ada')
or command[_].endswith('.adb')
or command[_].endswith('.tst')
# TODO: if a file name ends with .a, it may be an archive file
# or an Ada source; we will need to look into the file to decide.
]:
return idx
return -1
def find_output_idx(command, input_idx):
# Add implicit output
if not '-o' in command:
if '-c' in command:
input_src = command[input_idx]
name = input_src[:input_src.rindex('.')]
if '/' in name: name = name[name.rindex('/')+1:]
command.extend(['-o', "%s.o"%name])
elif '-S' in command:
input_src = command[input_idx]
name = input_src[:input_src.rindex('.')]
if '/' in name: name = name[name.rindex('/')+1:]
command.extend(['-o', "%s.s"%name])
elif 'mingw' in command[0]:
command.extend(['-o', "a.exe"])
else:
command.extend(['-o', "a.out"])
return command.index('-o') + 1
if __name__ == "__main__":
verbose, step2, command = parse_options(sys.argv[1:])
if verbose:
print("Compiler command: %s" % command)
print("Step2 definition: %s" % step2.param)
# =====================================================
# Analyze the command
if '-E' in command:
# Preprocessor only, do nothing else
os.execvp(command[0], command)
if step2.obj_input and '-S' in command:
print("Incompatibility between -S and %r; running: %s"
% (step2.param, ' '.join(command)))
os.execvp(command[0], command)
remove_M_options(command)
input_idx = find_input_idx(command)
if input_idx == -1:
print("The file containing the source code has not been found in: %s"
% ' '.join(command))
os.execvp(command[0], command)
out_idx = find_output_idx(command, input_idx)
# =====================================================
# Step 1: generate the intermediate result (asm or object)
import tempfile, shutil
tmpdir = tempfile.mkdtemp()
if verbose:
print("Tmpdir: %s" % tmpdir)
input_src = command[input_idx]
basefile = input_src[input_src.rfind('/')+1:]
basefile = basefile[:basefile.rfind('.')]
step2_base = tmpdir + '/' + basefile + '.new'
if step2.obj_input:
# generate object file
step1_output = tmpdir + '/' + basefile + '.o'
step1_command = [ _ for _ in command if not _.endswith('.a') ]
step1_command[out_idx] = step1_output
if not '-c' in command:
step1_command.insert(1, '-c')
else:
# generate assembly file
step1_output = tmpdir + '/' + basefile + '.s'
if input_src.endswith('.s'):
step1_command = ['cp', input_src, step1_output]
elif '-S' in command:
step1_command = command[:]
step1_command[out_idx] = step1_output
else:
step1_command = [ _ for _ in command if not _.endswith('.a') ]
step1_command[out_idx] = step1_output
if '-c' in command: step1_command[command.index('-c')] = '-S'
else: step1_command.insert(1, '-S')
spawn(step1_command)
if '-gnatea' in step1_command:
# The .ali file is needed by gnatbind
# https://gcc.gnu.org/onlinedocs/gnat_ugn/The-Ada-Library-Information-Files.html
ali_out = command[out_idx][:-2] + '.ali'
if step1_output == tmpdir + '/' + basefile + '.o':
# If the compiler is called with -c, then the .ali file is created
# in the same directory as the .o file.
ali_in = tmpdir + '/' + basefile + '.ali'
elif step1_output == tmpdir + '/' + basefile + '.s':
# If the compiler is called with -S, then the .ali file is created
# in the current directory.
# This is probably a bug of GNAT.
ali_in = basefile + '.ali'
if ali_in != ali_out:
spawn(['mv', ali_in, ali_out])
else:
print(['mv', ali_in, ali_out])
# =====================================================
# Step 2: analysis or modification of the intermediate result
step2_output = step2.run(step1_output, step2_base)
os.unlink(step1_output)
# =====================================================
# Step 3: generation of the final output
result = command[out_idx]
if step2.obj_output and '-c' in command:
spawn(['cp', step2_output, result])
elif '-S' in command:
spawn(['cp', step2_output, result])
else:
command_object = command[:]
command_object[out_idx] = result
command_object[input_idx] = step2_output
if '-x' in command_object:
# Remove -x flags, e.g. -x ada when using GNAT Pro 6.1.2
# because now the input is not Ada anymore
idx = command_object.index('-x')
command_object = command_object[:idx] + command_object[idx+2:]
spawn(command_object)
os.unlink(step2_output)
shutil.rmtree(tmpdir)
|
StarcoderdataPython
|
3469637
|
"""文字列基礎
文字列を数値に変換する方法
上付き数字や下付き数字を数値変換したい場合
[説明ページ]
https://tech.nkhn37.net/python-str-num-translation/#unicodedatadigit
"""
import unicodedata
# unicodedata.digitを用いた変換
# 上付き数字/下付き数字の変換
num1 = unicodedata.digit('⁰')
print(num1)
num2 = unicodedata.digit('₁')
print(num2)
|
StarcoderdataPython
|
4969691
|
from selenium import webdriver
# Chrome のオプションを設定する
options = webdriver.ChromeOptions()
options.add_argument('--headless')
# Selenium Server に接続する
driver = webdriver.Remote(
command_executor='http://localhost:4444/wd/hub',
desired_capabilities=options.to_capabilities(),
options=options,
)
# Selenium 経由でブラウザを操作する
driver.get('https://qiita.com')
print(driver.current_url)
# ブラウザを終了する
driver.quit()
|
StarcoderdataPython
|
3375332
|
import math
from collections.abc import Sequence
import torch
from mmdet.models.builder import HEADS
@HEADS.register_module()
class HeatmapDecodeOneKeypoint():
"""Decodes a heatmap to return a keypoint Only consider the highest
intensity value, does not handle a 2 keypoints case."""
def __init__(self, upscale=4, score_th=-1):
if not isinstance(upscale, Sequence):
upscale = [upscale]
if len(upscale) == 1:
upscale = [upscale[0], upscale[0]]
self.upscale = torch.tensor(upscale)
self.score_th = score_th
def init_weights(self):
pass
def __call__(self, x):
return self.forward(x)
def forward(self, x):
return self._decode_heatmap(x)
def _decode_heatmap(self, output):
coords = self._get_preds(output) # float type
coords = coords.cpu()
confs = torch.zeros_like(coords[:, :, 0])
res = output.size()[2:]
# post-processing
for n in range(coords.size(0)):
for p in range(coords.size(1)):
hm = output[n][p]
px = int(math.floor(coords[n][p][0]))
py = int(math.floor(coords[n][p][1]))
if (px >= 0) and (px < res[1]) and (py >= 0) and (py < res[0]):
px_m, px_p = max(0, px - 1), min(res[0] - 1, px + 1)
py_m, py_p = max(0, py - 1), min(res[1] - 1, py + 1)
diff = torch.Tensor([
hm[py][px_p] - hm[py][px_m],
hm[py_p][px] - hm[py_m][px]
])
coords[n][p] += (diff * self.upscale).abs().ceil(
) * diff.sign() / self.upscale
confs[n][p] = hm[py, px]
for c in range(2):
coords[n, p, c] = torch.clamp(coords[n, p, c], 0,
res[c])
preds = coords.clone()
# Transform back
for i in range(coords.size(0)):
preds[i] = self._transform_preds(coords[i])
if preds.dim() < 3:
preds = preds.view(1, preds.size())
low_conf = confs < self.score_th
confs[low_conf] = 0.
confs_shape = (n + 1, p + 1, 1)
low_conf = low_conf.reshape(confs_shape).repeat(1, 1, 2)
preds[low_conf] = -1
preds = torch.cat((preds, confs.reshape(confs_shape)), axis=2)
return preds
def _get_preds(self, scores, min_conf=0):
"""get predictions from score maps in torch Tensor."""
assert scores.dim() == 4, 'Score maps should be 4-dim'
maxval, idx = torch.max(
scores.view(scores.size(0), scores.size(1), -1), 2)
maxval = maxval.view(scores.size(0), scores.size(1), 1)
idx = idx.view(scores.size(0), scores.size(1), 1) + 1
preds = idx.repeat(1, 1, 2).float()
preds[:, :, 0] = (preds[:, :, 0] - 1) % scores.size(3)
preds[:, :, 1] = torch.floor((preds[:, :, 1] - 1) / scores.size(3))
pred_mask = maxval.gt(min_conf).repeat(1, 1, 2).float()
preds *= pred_mask
return preds
def _transform_preds(self, coords):
coords[:, 0:2] = coords[:, 0:2] * self.upscale
return coords
|
StarcoderdataPython
|
6619815
|
#!/home/amarchal/py2env/bin/python
'''This program build synthetic obs (21cm line) from T,n and vz which are the three-dimensional
field of the numerical simulation based on the work of Saury et al. 2014'''
import numpy as np
from glob import glob
from tqdm import tqdm
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy import units
from astropy import constants as const
from scipy import ndimage
import scipy.integrate as integrate
import FITS_tools
plt.ion()
plot = False
cm = plt.get_cmap('viridis')
cm.set_bad(color='black')
imkw = dict(origin='lower', interpolation='none', cmap=cm)
def I_Tb(params):
(u, vz, n_Delta, T, C, Delta2, dz) = params
dI = n_Delta * np.exp(- (u - (vz))**2 / (2.*Delta2))
dI[np.where(dI != dI)] = 0.
I = 1./(C * np.sqrt(2.*np.pi)) * integrate.simps(dI, dx=dz, axis=0)
return I
# Constant
m_h = 1.6737236e-27 #kg
C = 1.82243e18 #K-1cm-2 / (km.s-1)
pc2cm = units.pc.to(units.m) * 1.e2
box_size = 40. # pc
resolution = 1024.
dz = (box_size / resolution) * pc2cm
# Open data
path_simu = '/data/amarchal/ROHSA_paper/data/Saury2014/'
path_out = '/data/amarchal/ROHSA_paper/data/synthetic_obs/'
hdu_list_rho = fits.open(path_simu + 'rho_016_subgrid_256.fits')
hdu_list_T = fits.open(path_simu + 'T_016_subgrid_256.fits')
hdu_list_vz = fits.open(path_simu + 'vz_016_subgrid_256.fits')
reso = 0.8 #km.s-1
rho_cube = hdu_list_rho[0].data #g.cm-3
T_cube = hdu_list_T[0].data
vz_cube = hdu_list_vz[0].data * 1.e-5 #km.s-1 ATTENTION
## CUT TEMPERATURE
Tk_lim_inf = 0
Tk_lim_sup = np.inf
idx_phase = np.where((T_cube > Tk_lim_inf) & (T_cube < Tk_lim_sup))
rho_cube_phase = np.zeros((rho_cube.shape[0], rho_cube.shape[1], rho_cube.shape[2]))
T_cube_phase = np.zeros((rho_cube.shape[0], rho_cube.shape[1], rho_cube.shape[2]))
vz_cube_phase = np.zeros((rho_cube.shape[0], rho_cube.shape[1], rho_cube.shape[2]))
rho_cube_phase[idx_phase] = rho_cube[idx_phase]
T_cube_phase[idx_phase] = T_cube[idx_phase]
vz_cube_phase[idx_phase] = vz_cube[idx_phase]
##
# Preliminary calculation
Delta2 = ((const.k_B.value * T_cube_phase / m_h)) * 1.e-6 #km.s-1
n = rho_cube_phase/(m_h*1.e3)
n_Delta = n / np.sqrt(Delta2)
# Spectral range
u = np.arange(-40,40+reso, reso)
map_u = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
for i in np.arange(T_cube_phase.shape[1]):
for j in np.arange(T_cube_phase.shape[2]):
map_u[:,i,j] = u
Tb = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
Tb_thin = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
tau_in_front = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
for i in tqdm(range(T_cube_phase.shape[0])):
Tb_z = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
tau_z = 1. / (C * np.sqrt(2.*np.pi)) * n_Delta[i] / T_cube_phase[i] * np.exp(- (map_u - (vz_cube_phase[i]))**2 / (2.*Delta2[i])) * dz
idx_nonzero = ~np.isnan(tau_z[0])
Tb_z[:,idx_nonzero] = T_cube_phase[i,idx_nonzero] * (1. - np.exp(-1.*tau_z[:,idx_nonzero])) * np.exp(-1.*tau_in_front[:,idx_nonzero])
tau_in_front[:,idx_nonzero] += tau_z[:,idx_nonzero]
Tb += Tb_z
Tb_thin[:,idx_nonzero] += tau_z[:,idx_nonzero] * T_cube_phase[i,idx_nonzero]
# Tb_thin_fast = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
# for i in tqdm(range(len(u))):
# Tb_thin_fast[i] = I_Tb((u[i], vz_cube_phase, n_Delta, T_cube_phase, C, Delta2, dz))
fileout = 'Tb_reso_' + str(reso) + 'km.s-1_' + "Tmin_" + str(Tk_lim_inf) + "_Tmax_" + str(Tk_lim_sup) + '_ROHSA.fits'
fileout_thin = 'Tb_reso_' + str(reso) + 'km.s-1_' + "Tmin_" + str(Tk_lim_inf) + "_Tmax_" + str(Tk_lim_sup) + '_ROHSA_thin.fits'
# Write PPV cube
hdu0 = fits.PrimaryHDU(Tb)
hdu0.header['COMMENT'] = 'Brightness Temperature Tb'
hdu0.header['NAXIS'] = 3
hdu0.header['NAXIS1'] = Tb.shape[1]
hdu0.header['NAXIS2'] = Tb.shape[2]
hdu0.header['NAXIS3'] = len(u)
hdu0.header['CTYPE3'] = 'v [km.s-1]'
hdu0.header['CRVAL3'] = u[40]
hdu0.header['CDELT3'] = reso
hdu0.header['CRPIX3'] = 40
hdu0.header['BUNIT'] = 'K'
hdulist = fits.HDUList([hdu0])
hdulist.writeto(path_out + fileout, overwrite=True)
# Write PPV cube thin limit
hdu0 = fits.PrimaryHDU(Tb_thin)
hdu0.header['COMMENT'] = 'Brightness Temperature Tb'
hdu0.header['NAXIS'] = 3
hdu0.header['NAXIS1'] = Tb_thin.shape[1]
hdu0.header['NAXIS2'] = Tb_thin.shape[2]
hdu0.header['NAXIS3'] = len(u)
hdu0.header['CTYPE3'] = 'v [km.s-1]'
hdu0.header['CRVAL3'] = u[40]
hdu0.header['CDELT3'] = reso
hdu0.header['CRPIX3'] = 40
hdu0.header['BUNIT'] = 'K'
hdulist = fits.HDUList([hdu0])
hdulist.writeto(path_out + fileout_thin, overwrite=True)
|
StarcoderdataPython
|
1957914
|
<gh_stars>1-10
# import json
# from server.helpers import encap_str
import re
from server.types import PageLanguage
from server.types import PageOperation
# from server.types import ResponseOperation
from server.types import PageParameters
from server.list import List
from server.question import Question
# from server.stats import Stats
from server.stat_charts import prepare_user_stats_chart
import server.helpers as helpers
import server.context as context
from server.timers import timer_section
from server.helpers import Transliterate
import logging
USE_GOOGLE_AUTH = True
class Design_default(object):
total_questions = 5
@staticmethod
def _get_color(year):
color_list = ["#ff6956", "#489cba", "#f7b500", "#6ab288"]
return color_list[(year - 1) % len(color_list)]
@staticmethod
@timer_section("render_main_page")
def render_main_page(page):
# If login, update user and replace op with the original op
if page.page_params.get_param("op") == PageOperation.LOGIN_ANON:
new_url = page.login_anon()
context.c.headers.redirect(new_url)
return ""
if page.page_params.get_param("op") == PageOperation.LOGIN_GOOGLE:
new_url, ok = page.login_google()
context.c.headers.set_content_type('text/plain')
return "OK:{}".format(new_url) if ok else "ERROR:{}".format(new_url)
if page.page_params.get_param("op") == PageOperation.LOGOUT:
new_url = page.logout()
context.c.headers.redirect(new_url)
return ""
if not context.c.user:
# First login, if not already done
page.page_params.set_param("op", PageOperation.MENU_USER)
user = context.c.user
# If we happen to get to too many questions (e.g. reloading or returning to a test from elsewhere)
# here we check that we didn't reach the end counter, and if we did, redirect to summary
if (page.page_params.get_param("op") == PageOperation.TEST or \
page.page_params.get_param("op") == PageOperation.TEST_PREV) and \
context.c.session.get("history") and len(context.c.session.get("history")) >= Design_default.total_questions:
page.page_params.set_param("op", PageOperation.SUMMARY)
#Design_default.add_header(page)
#Design_default.add_background(page)
if page.page_params.get_param("op") == PageOperation.TEST or \
page.page_params.get_param("op") == PageOperation.TEST_PREV:
# Tests
logging.debug("PageOperation.TEST - {}".format(page.page_params.get_param("root")))
Design_default.render_test_page(page)
return page.render()
elif page.page_params.get_param("op") == PageOperation.SUMMARY:
# Last page
logging.debug("PageOperation.SUMMARY")
Design_default.render_summary_page(page)
return page.render()
elif page.page_params.get_param("op") == PageOperation.CONFIRM_ANON:
# Confirm anonymous login
logging.debug("PageOperation.CONFIRM_ANON")
Design_default.render_confirm_anon_page(page)
return page.render()
elif page.page_params.get_param("op") == PageOperation.TEST_INTRO:
# Test intro
logging.debug("PageOperation.TEST_INTRO")
context.c.session.set("showed_test_intro", True)
Design_default.render_select_get_test_started_page(page)
return page.render()
if page.page_params.get_param("op") == PageOperation.BROWSE:
# Browse
logging.debug("PageOperation.BROWSE - {}".format(page.page_params.get_param("root")))
Design_default.render_browse_page(page)
return page.render()
elif page.page_params.get_param("op") == PageOperation.BROWSE_INTRO:
# Browse intro
logging.debug("PageOperation.BROWSE_INTRO")
context.c.session.set("showed_browse_intro", True)
Design_default.render_select_get_browse_started_page(page)
return page.render()
elif page.page_params.get_param("op") == PageOperation.MENU_USER and not context.c.user:
# If user is already logged in, we cannot do login again
logging.debug("PageOperation.MENU - select user")
Design_default.render_select_user_page(page)
return page.render()
elif page.page_params.get_param("op") == PageOperation.MENU_YEAR or \
page.page_params.get_param("op") == PageOperation.DEFAULT:
# No year selected, select it
logging.debug("PageOperation.MENU - year")
Design_default.render_select_year_page(page)
return page.render()
elif page.page_params.get_param("op") == PageOperation.MENU_THEME:
# No theme selected, select it
logging.debug("PageOperation.MENU - theme")
Design_default.render_select_theme_page(page)
return page.render()
elif page.page_params.get_param("op") == PageOperation.STATS:
if user and user.user_id:
# Full user id (e.g. google:XXXX)
u_id = user.user_id
# TBD: old notation for local tests - remove "local:" if present
if len(u_id) >= len("local:") and u_id[:len("local:")] == "local:":
u_id = u_id[len("local:"):]
Design_default.render_user_stats(page, u_id)
else:
logging.info("PageOperation.STATS - no user - select user")
page.page_params.set_param("op", PageOperation.MENU_USER)
Design_default.render_select_year_page(page)
return page.render()
elif page.page_params.get_param("op") == PageOperation.ABOUT:
Design_default.render_about(page)
return page.render()
else:
# Something mesed up the state - clean up the state and go to the intro
# We could get PageOperation.MENU_USER with a wrong bookmark, so don't log
if page.page_params.get_param("op") != PageOperation.MENU_USER:
logging.error("PageOperation.MENU - wrong parameters - select year: {}".format(page.page_params.get_param("op")))
page.page_params.set_param("op", PageOperation.MENU_YEAR)
Design_default.render_select_year_page(page)
return page.render()
@staticmethod
@timer_section("add_menu")
def add_menu(page):
content = page.repository.get_content(PageLanguage.toStr(page.page_params.get_param("language")))
page.template_params['menu'] = []
#page.page_params.print_params()
current_lang = page.page_params.get_param("language")
new_page_params = PageParameters()
new_page_params.set_param("root", page.page_params.get_param("root"))
new_page_params.set_param("op", PageOperation.MENU_YEAR)
new_page_params.set_param("language", page.page_params.get_param("language"))
user_picture = context.c.session.get("user_picture")
if not user_picture is None:
page.template_params["user_picture"] = user_picture
page.template_params["user_name"] = context.c.session.get("user_name")
sublang = []
for lang in page.get_language_list():
sublang.append({
"name" : "<input type=\"image\" style=\"padding: 0px\" width=\"27px\" "
"height=\"15px\" alt=\"" + page.get_language_details(lang)["country"] +
"\" src=\"" + page.get_file_url("images/" +
page.get_language_details(lang)["image"]) + "\"> " +
page.get_language_details(lang)["country"],
"link" : page.page_params.create_url( \
op = PageOperation.MENU_YEAR,
beta = True if page.page_params.get_param("beta") else None,
language = lang)
})
# Jezik:
lang = {
"name" : "<input type=\"image\" style=\"padding: 0px\" width=\"27px\" "
"height=\"15px\" alt=\"" + page.get_language_details()["country"] +
"\" src=\"" + page.get_file_url("images/" +
page.get_language_details()["image"]) + "\">",
"submenu" : {
"id" : "lang",
"options" : sublang
}
}
page.template_params['menu'].append(lang)
menu_id = 0
lists = {
"name" : page.get_messages()["year"].upper(),
"link" : new_page_params.create_url(\
op = PageOperation.MENU_YEAR, \
language = PageLanguage.toStr(page.page_params.get_param("language")), \
beta = True if page.page_params.get_param("beta") else None),
"submenu" : {
"id" : "zadaci_{}".format(menu_id),
"options" : []
}
}
menu_id = menu_id + 1
for level in sorted(content.keys()):
# options = []
# for theme in sorted(content[level].keys()):
# if not theme == "level_short":
# options.append({
# "name" : theme.upper(),
# "link" : new_page_params.create_url(\
# op = PageOperation.toStr(PageOperation.TEST_INTRO), \
# year = level, \
# theme = theme, \
# subtheme = "*", \
# topic = "*", \
# period = "*", \
# difficulty = "*", \
# l_id = content[level][theme]["name"], \
# js = False)
# })
# Year 7+ are not complete so we only show in beta mode
# I had an error in logs, not sure why (some inconsistency in input data):
# File "./server/design_default.py", line 238, in add_menu
# if int(level) <= 4 or page.page_params.get_param("beta"):
# ValueError: invalid literal for int() with base 10: 'first'
# so added exception handling for ValueError
try:
if int(level) <= 6 or page.page_params.get_param("beta"):
lists['submenu']['options'].append({
"name" : level.upper(),
"link" : new_page_params.create_url( \
op = PageOperation.MENU_THEME, \
language = PageLanguage.toStr(page.page_params.get_param("language")), \
year = level,
beta = True if page.page_params.get_param("beta") else None)
# js = False),
# "submenu" : {
# "id" : "zadaci_{}".format(menu_id),
# "options" : options
# }
})
menu_id = menu_id + 1
except ValueError:
pass
page.template_params['menu'].append(lists)
# # Do not show results to an anonymous user
# if not user_picture is None:
page.template_params['menu'].append({
"name" : page.get_messages()["results"].upper(),
"link" : new_page_params.create_url(
op = PageOperation.STATS,
language = PageLanguage.toStr(page.page_params.get_param("language")), \
beta = True if page.page_params.get_param("beta") else None
)
})
# "name" : "<NAME> (" + context.c.user.name + ")",
page.template_params['menu'].append({
"name" : page.get_messages()["about"].upper(),
"link" : new_page_params.create_url(
op = PageOperation.ABOUT,
language = PageLanguage.toStr(page.page_params.get_param("language")), \
beta = True if page.page_params.get_param("beta") else None
)
})
# "name" : "Izloguj se (" + context.c.user.name + ")",
page.template_params['menu'].append({
"name" : page.get_messages()["logout"].upper(),
"link" : new_page_params.create_url(
op = PageOperation.LOGOUT,
language = PageLanguage.toStr(page.page_params.get_param("language")), \
beta = True if page.page_params.get_param("beta") else None
)
})
@staticmethod
def _add_language(page, s):
return page.page_params.get_param("language").value + "/" + s
@staticmethod
@timer_section("render_select_user_page")
def render_select_user_page(page):
page.page_params.delete_history()
page.page_params.set_param("year", "")
page.page_params.set_param("theme", "")
page.page_params.set_param("subtheme", "")
page.page_params.set_param("topic", "")
page.page_params.set_param("q_id", "")
page.page_params.set_param("l_id", "")
page.template_params["template_name"] = Design_default._add_language(page, "user.html.j2")
page.template_params["h1"] = "Login"
page.template_params['current_lang'] = \
"<input type=\"image\" style=\"padding: 0px\" width=\"27px\" " \
"height=\"15px\" alt=\"" + page.get_language_details()["country"] + \
"\" src=\"" + page.get_file_url("images/" + \
page.get_language_details()["image"]) + "\">"
sublang = []
for lang in page.get_language_list():
sublang.append({
"name" : "<input type=\"image\" style=\"padding: 0px\" width=\"27px\" "
"height=\"15px\" alt=\"" + page.get_language_details(lang)["country"] +
"\" src=\"" + page.get_file_url("images/" +
page.get_language_details(lang)["image"]) + "\"> " +
page.get_language_details(lang)["country"],
"link" : page.page_params.create_url(
op = PageOperation.MENU_USER,
language = lang,
beta = True if page.page_params.get_param("beta") else None
)
})
page.template_params['menu_lang'] = sublang
root = page.page_params.get_param("root")
#page.template_params["google_link"] = "{}?op={}".format(
# root,
# PageOperation['LOGIN_GOOGLE'].value
#)
page.template_params["google_link"] = page.page_params.create_url(
op = PageOperation.LOGIN_GOOGLE,
beta = True if page.page_params.get_param("beta") else None)
page.template_params["guest_link"] = page.page_params.create_url(
op = PageOperation.LOGIN_ANON,
language = PageLanguage.toStr(page.page_params.get_param("language")), \
beta = True if page.page_params.get_param("beta") else None)
@staticmethod
@timer_section("render_select_year_page")
def render_select_year_page(page):
page.page_params.delete_history()
page.page_params.set_param("year", "")
page.page_params.set_param("theme", "")
page.page_params.set_param("subtheme", "")
page.page_params.set_param("topic", "")
page.page_params.set_param("q_id", "")
page.page_params.set_param("l_id", "")
page.template_params["template_name"] = Design_default._add_language(page, "year.html.j2")
page.template_params["h1"] = "Izaberi godinu"
content = page.repository.get_content(PageLanguage.toStr(page.page_params.get_param("language")))
if content :
#ordered = collections.OrderedDict()
ordered = {}
for year in content.keys():
ordered[content[year]["level_short"]] = year
scale = 1
width = int(137 * scale)
height = int(140 * scale)
font_size = int(111 * scale)
# margin = int(10 * scale)
page.template_params['button'] = {
'width' : '{}px'.format(width),
'height' : '{}px'.format(height),
'font_size' : '{}px'.format(font_size),
'choices' : []
}
# Create dictionary entries that define menu
Design_default.add_menu(page)
i = 0
for ynumber in sorted(ordered.keys()):
# Year 7+ are not complete so we only show in beta mode
if int(ordered[ynumber]) <= 6 or page.page_params.get_param("beta"):
year = ordered[ynumber]
page.template_params['button']['choices'].append({
'title' : ynumber,
'front_color' : Design_default._get_color(i+1),
#'back_color' : '#f9f9f9',
'back_color' : '#ffffff',
'link' : page.page_params.create_url(
op = PageOperation.MENU_THEME,
language = PageLanguage.toStr(page.page_params.get_param("language")), \
year = year, \
theme = "", \
subtheme = "", \
topic = "", \
period = "", \
difficulty = "", \
beta = True if page.page_params.get_param("beta") else None)
})
i = i+1
else:
page.template_params["template_name"] = Design_default._add_language(page, "error.html.j2")
page.template_params["error_msg"] = "No content"
@staticmethod
def _next_theme_test_url(page, theme, subtheme, topic, period, difficulty, l_id):
# Only show intro once per login
if context.c.session.get("showed_test_intro"):
page.page_params.set_param("theme", theme)
page.page_params.set_param("subtheme", subtheme)
page.page_params.set_param("topic", topic)
page.page_params.set_param("period", period)
page.page_params.set_param("difficulty", difficulty)
page.page_params.set_param("l_id", l_id)
test = List(page)
url_next, url_skip = test.get_next_question_test_url(Design_default.total_questions)
else:
url_next = page.page_params.create_url(
op = PageOperation.TEST_INTRO,
language = PageLanguage.toStr(page.page_params.get_param("language")), \
theme = theme,
subtheme = subtheme,
topic = topic,
period = period,
difficulty = difficulty,
beta = True if page.page_params.get_param("beta") else None,
l_id = l_id)
return url_next
@staticmethod
def _next_theme_browse_url(page, theme, subtheme, topic, period, difficulty, l_id):
# Only show intro once per login
if context.c.session.get("showed_browse_intro"):
page.page_params.set_param("theme", theme)
page.page_params.set_param("subtheme", subtheme)
page.page_params.set_param("topic", topic)
page.page_params.set_param("period", period)
page.page_params.set_param("difficulty", difficulty)
page.page_params.set_param("l_id", l_id)
test = List(page)
_, url_next = test.get_prev_next_questions_browse_url()
else:
url_next = page.page_params.create_url(
op = PageOperation.BROWSE_INTRO,
language = PageLanguage.toStr(page.page_params.get_param("language")), \
theme = theme,
subtheme = subtheme,
topic = topic,
period = period,
difficulty = difficulty,
beta = True if page.page_params.get_param("beta") else None,
l_id = l_id)
return url_next
# BROWSE
@staticmethod
def _next_theme_url(page, theme, subtheme, topic, period, difficulty, l_id):
return Design_default._next_theme_browse_url(page, theme, subtheme, topic, period, difficulty, l_id)
@staticmethod
@timer_section("render_select_theme_page")
def render_select_theme_page(page):
page.page_params.delete_history()
page.page_params.set_param("theme", "")
page.page_params.set_param("subtheme", "")
page.page_params.set_param("topic", "")
page.page_params.set_param("q_id", "")
page.page_params.set_param("l_id", "")
# Create dictionary entries that define menu
Design_default.add_menu(page)
lang = page.page_params.get_param("language")
content = page.repository.get_content(PageLanguage.toStr(lang))
icon_cnt = 0
if content and page.page_params.get_param("year") in content.keys():
page.template_params["template_name"] = Design_default._add_language(page, "theme.html.j2")
page.template_params['year'] = page.page_params.get_param("year").upper().strip()
try:
int_year = int(page.page_params.get_param("year"))
except:
int_year = 1
pass
page.template_params['year_color'] = Design_default._get_color(int_year)
page.template_params['url_year'] = page.page_params.create_url(
language = PageLanguage.toStr(page.page_params.get_param("language")), \
op = PageOperation.MENU_YEAR,
beta = True if page.page_params.get_param("beta") else None)
page.template_params['themes'] = []
page.template_params["h1"] = page.template_params['year']
page.template_params["h2"] = "Izaberi temu"
# page.add_lines("<div style='width: auto ;margin-left: auto ;margin-right: auto ;'>\n")
# page.add_lines("<h3> {} razred - izaberi oblast</h3>\n".format(page.page_params.get_param("year").title()))
# page.add_lines("</div>\n")
# Sort according to the assigned rank
sorted_themes = []
for theme in sorted(content[page.page_params.get_param("year")].keys()):
if not theme == "level_short":
sorted_themes.append([theme, content[page.page_params.get_param("year")][theme]["rank"]])
sorted_themes.sort(key=lambda x:x[1])
sorted_themes = [x[0] for x in sorted_themes]
#for theme in sorted(content[page.page_params.get_param("year")].keys()):
for theme in sorted_themes:
subtheme_list = []
subtheme_dict = dict()
# Special provisioing for Serbian cyrillic
if lang == PageLanguage.RSC:
theme_o = Transliterate.rs(theme)
else:
theme_o = theme
for subclass in sorted(content[page.page_params.get_param("year")][theme].keys()):
if not subclass == "name" and not subclass == "rank":
subtheme = content[page.page_params.get_param("year")][theme][subclass]["subtheme"].strip()
topic = content[page.page_params.get_param("year")][theme][subclass]["topic"].strip()
# Special provisioing for Serbian cyrillic
if lang == PageLanguage.RSC:
subtheme_o = Transliterate.rs(subtheme)
topic_o = Transliterate.rs(topic)
else:
subtheme_o = subtheme
topic_o = topic
rank_subtheme = content[page.page_params.get_param("year")][theme][subclass]["rank_subtheme"].strip()
rank_topic = content[page.page_params.get_param("year")][theme][subclass]["rank_topic"].strip()
period = content[page.page_params.get_param("year")][theme][subclass]["period"]
if subtheme not in subtheme_dict.keys():
icon_svg = page.repository.get_icon_svg(PageLanguage.toStr(lang), subtheme)
# Different SVGs can have same path IDs (e.g. created in the same program)
# So we change names here
#icon_svg = re.sub(r'id="(.*?)"', 'id="\\1_R_{}"'.format(icon_cnt), icon_svg)
icon_svg = re.sub(r'cls-(.)', 'cld-\\1_R_{}'.format(icon_cnt), icon_svg)
icon_cnt = icon_cnt + 1
subtheme_d = {
'title' : subtheme_o.capitalize(),
'icon' : icon_svg,
'rank_subtheme' : rank_subtheme,
'topics' : [],
'topics_dir' : {},
'min_period' : period,
'link' : Design_default._next_theme_url(
page = page,
theme = theme.title().strip(),
subtheme = subtheme,
topic = "*",
period = "*",
difficulty = "*",
l_id = content[page.page_params.get_param("year")][theme]["name"])
}
subtheme_dict[subtheme] = subtheme_d
subtheme_list.append(subtheme_d)
# BROWSE
link = Design_default._next_theme_test_url(
page = page,
theme = theme.title().strip(),
subtheme = subtheme,
topic = "*",
period = "*",
difficulty = "*",
l_id = content[page.page_params.get_param("year")][theme]["name"])
topic_d = {
# Special provisioing for Serbian cyrillic
'title' : "Sve teme" if not lang == PageLanguage.RSC else "Све теме",
'rank_topic' : "0",
'min_period' : "0",
'link' : link
}
# BROWSE
topic_d['rank_topic'] = "9999"
topic_d['title'] = page.get_messages()['test']
topic_d['color'] = Design_default._get_color(int_year)
topic_d['font-weight'] = 'bolder'
topic_d['font-size'] = '12px'
subtheme_d['topics_dir']["all"] = topic_d
subtheme_d['topics'].append(topic_d)
else:
subtheme_d = subtheme_dict[subtheme]
if topic not in subtheme_d['topics_dir'].keys():
topic_d = {
'title' : topic_o.capitalize(),
'rank_topic' : rank_topic,
'min_period' : period,
'link' : Design_default._next_theme_url(
page = page,
theme = theme.title().strip(),
subtheme = subtheme,
topic = topic,
period = "*",
difficulty = "*",
l_id = content[page.page_params.get_param("year")][theme]["name"])
}
subtheme_d['topics_dir'][topic] = topic_d
subtheme_d['topics'].append(topic_d)
else:
topic_d = subtheme_d['topics_dir'][topic]
subtheme_d['min_period'] = period if period < subtheme_d['min_period'] else subtheme_d['min_period']
topic_d['min_period'] = period if period < topic_d['min_period'] else topic_d['min_period']
# Sort first by period and then alphabetically
#subtheme_list.sort(key=lambda x:x['min_period'] + x['title'])
subtheme_list.sort(key=lambda x:x['rank_subtheme'] + x['title'])
#logging.debug("THEME {}: \n{}\n\n".format(
# theme, [[x['title'], x['min_period']] for x in subtheme_list] ))
for st in subtheme_list:
#st['topics'].sort(key=lambda x:x['min_period'] + x['title'])
st['topics'].sort(key=lambda x:x['rank_topic'] + x['title'])
#logging.debug("SUBTHEME {}: \n{}\n\n".format(
# st['title'], [[x['title'], x['min_period']] for x in st['topics']] ))
page.template_params['themes'].append({
'title' : theme_o.capitalize().strip(),
'link' : Design_default._next_theme_url(
page = page,
theme = theme.title().strip(), \
subtheme = "*", \
topic = "*", \
period = "*", \
difficulty = "*", \
l_id = content[page.page_params.get_param("year")][theme]["name"]),
'subthemes' : subtheme_list
})
else:
page.template_params["template_name"] = Design_default._add_language(page, "error.html.j2")
if not page.page_params.get_param("year") in content.keys():
page.template_params["error_msg"] = "No year {} in content".format(page.page_params.get_param("year"))
else:
page.template_params["error_msg"] = "No content"
@staticmethod
@timer_section("render_confirm_anon_page")
def render_confirm_anon_page(page):
page.template_params["template_name"] = Design_default._add_language(page, "confirm_anon.html.j2")
page.template_params["next"] = page.page_params.create_url(\
op = PageOperation.MENU_YEAR,
language = PageLanguage.toStr(page.page_params.get_param("language")),
beta = True if page.page_params.get_param("beta") else None
)
page.template_params["back"] = page.page_params.create_url(\
op = PageOperation.LOGOUT,
language = PageLanguage.toStr(page.page_params.get_param("language")),
beta = True if page.page_params.get_param("beta") else None
)
@staticmethod
@timer_section("render_select_get_test_started_page")
def render_select_get_test_started_page(page):
page.page_params.delete_history()
page.page_params.set_param("q_id", "")
# Create dictionary entries that define menu
Design_default.add_menu(page)
page.template_params["template_name"] = Design_default._add_language(page, "test_intro.html.j2")
page.template_params["year"] = page.page_params.get_param("year").title()
page.template_params["theme"] = page.page_params.get_param("theme").title()
page.template_params["subtheme"] = page.page_params.get_param("subtheme").title()
page.template_params["topic"] = page.page_params.get_param("topic").title()
page.template_params["period"] = context.c.session.get("period").title()
page.template_params["difficulty"] = context.c.session.get("difficulty").title()
page.template_params["h1"] = page.template_params['year']
page.template_params["h2"] = page.template_params["theme"]
page.template_params["h3"] = page.template_params["topic"]
page.template_params["h4"] = "Start test"
if context.c.session.get("beta"):
page.template_params["beta"] = True
else:
page.template_params["beta"] = False
test = List(page)
url_next, url_skip = test.get_next_question_test_url(Design_default.total_questions)
page.template_params["next"] = url_next
page.template_params["skip"] = url_skip
page.template_params["back"] = page.page_params.create_url(\
op = PageOperation.MENU_THEME,
language = PageLanguage.toStr(page.page_params.get_param("language")),
subtheme = "",
topic = "",
period = "",
difficulty = "",
l_id = "",
beta = True if page.page_params.get_param("beta") else None
)
@staticmethod
def _render_result_bar_and_get_last_difficulty(page):
difficulty = "0"
page.template_params["total_bar"] = {"star1": 0, "star2": 0, "star3": 0, "missed": 0}
page.template_params["type_bar"] = {"star1": 0, "star2": 0, "star3": 0, "missed": 0}
# Count only the best asnwer to each question
questions = {}
if context.c.session.get("history"):
for r in context.c.session.get("history"):
if "difficulty" in r.keys() and (r["difficulty"] == "1" or r["difficulty"] == "2" or r["difficulty"] == "3"):
difficulty = r["difficulty"]
if not r["q_id"] in questions.keys():
questions[r["q_id"]] = r
else:
if r["incorrect"] < questions[r["q_id"]]["incorrect"]:
questions[r["q_id"]]["incorrect"] = r["incorrect"]
questions[r["q_id"]]["correct"] = r["correct"]
# A question is correct if all subquestions are correct
for k, r in questions.items():
if r["difficulty"] == "1":
if r["incorrect"] == 0:
page.template_params["type_bar"]["star1"] = page.template_params["type_bar"]["star1"] + 1
else:
page.template_params["type_bar"]["missed"] = page.template_params["type_bar"]["missed"] + 1
page.template_params["total_bar"]["star1"] = page.template_params["total_bar"]["star1"] + r["correct"]
page.template_params["total_bar"]["missed"] = page.template_params["total_bar"]["missed"] + r["incorrect"]
elif r["difficulty"] == "2":
if r["incorrect"] == 0:
page.template_params["type_bar"]["star2"] = page.template_params["type_bar"]["star2"] + 1
else:
page.template_params["type_bar"]["missed"] = page.template_params["type_bar"]["missed"] + 1
page.template_params["total_bar"]["star2"] = page.template_params["total_bar"]["star2"] + r["correct"]
page.template_params["total_bar"]["missed"] = page.template_params["total_bar"]["missed"] + r["incorrect"]
elif r["difficulty"] == "3":
if r["incorrect"] == 0:
page.template_params["type_bar"]["star3"] = page.template_params["type_bar"]["star3"] + 1
else:
page.template_params["type_bar"]["missed"] = page.template_params["type_bar"]["missed"] + 1
page.template_params["total_bar"]["star3"] = page.template_params["total_bar"]["star3"] + r["correct"]
page.template_params["total_bar"]["missed"] = page.template_params["total_bar"]["missed"] + r["incorrect"]
return difficulty
@staticmethod
# Store page parameters into session
# Used when registering results in order to get good statistics of years/topics/themes/etc
def _store_last_question_into_session(page):
context.c.session.set("last_q_year", page.page_params.get_param("year"))
context.c.session.set("last_q_theme", page.page_params.get_param("theme"))
context.c.session.set("last_q_subtheme", page.page_params.get_param("subtheme"))
context.c.session.set("last_q_topic", page.page_params.get_param("topic"))
@staticmethod
@timer_section("render_test_page")
def render_test_page(page):
test = List(page)
Design_default._store_last_question_into_session(page)
# Create dictionary entries that define menu
Design_default.add_menu(page)
page.template_params["template_name"] = Design_default._add_language(page, "test.html.j2")
q_id = page.page_params.get_param("q_id")
q_number = context.c.session.get("q_num")
try:
q_number = int(q_number) if q_number else 0
except ValueError as ex:
logging.error("Incorrect q_num={}\n{}".format(q_number, helpers.get_stack_trace()))
q_number = 0
skipped = page.page_params.get_param("skipped")
if skipped and isinstance(skipped, str) and skipped.lower() == "true":
try:
context.c.session.get("history")[-1]["skipped"] = True
except:
# I believe this happens when a link with "skipped" parameteris bookmarked,
# so we don't want alerts on this one.
logging.debug("Cannot mark last question as skipped\nhist={}\n{}".format(
context.c.session.get("history"), helpers.get_stack_trace()
))
hist = None
if page.page_params.get_param("op") == PageOperation.TEST_PREV or q_number == test.get_q_number() - 1:
context.c.session.list_delete("history", -1)
# At this point current q_id should match the last one in history,
# otherwise there was an error creating TEST_PREV link
if not q_id == context.c.session.get("history")[-1]["q_id"]:
logging.error("Error getting back in questions: q_id={}, q_num={}\nHist={}\n{}".format(
q_id, q_number, context.c.session.get("history"), helpers.get_stack_trace()
))
else:
if q_number == test.get_q_number() + 1:
# New question - add to history
# Apriory set to incorrect so that we have matching history on skip
hist = {
"q_id" : q_id,
"url" : page.page_params.get_url(),
"correct" : 0,
"incorrect" : 1
}
#hist.update(next_question)
context.c.session.list_append("history", hist)
elif q_number > 0 and q_number == test.get_q_number():
# The same question - probably a refresh
# Happens too often, not sure why (refreshes, clicks on history?) so just ignoring
if not q_id == context.c.session.get("history")[-1]["q_id"]:
logging.debug("Error in history: q_id={}, q_num={}\nHist={}\n{}".format(
q_id, q_number, context.c.session.get("history"), helpers.get_stack_trace()
))
else:
# This happens too often - for now just correct the number
# Likely wrong
logging.debug("Error in question numbering: q_id={}, q_num={}/{}\nHist={}\n{}".format(
q_id, q_number, test.get_q_number(), context.c.session.get("history"), helpers.get_stack_trace()
))
q_number = test.get_q_number()
page.page_params.set_param("q_num", q_number)
# DEBUG
# context.c.session.print()
test_id = context.c.session.get("test_id")
q = Question(page=page, q_id=q_id, test_id=test_id, test_order=q_number)
q.set_from_file_with_exception()
q.eval_with_exception()
url_next, url_skip = test.get_next_question_test_url(Design_default.total_questions)
page.template_params["next"] = url_next
page.template_params["skip"] = url_skip
#url_prev = test.get_prev_question_test_url()
#page.template_params["back"] = url_prev
Design_default._render_result_bar_and_get_last_difficulty(page)
all_questions = page.repository.get_content_questions(
PageLanguage.toStr(page.page_params.get_param("language")),
page.page_params.get_param("year"),
page.page_params.get_param("theme"))
if page.page_params.get_param("q_id") in all_questions.keys():
difficulty = all_questions[page.page_params.get_param("q_id")]["difficulty"]
else:
difficulty = None
if hist and page.page_params.get_param("q_id") in all_questions.keys():
hist["difficulty"] = difficulty
page.template_params["q_number"] = str(q_number)
page.template_params["debug"] = "DEBUG: {} / {} / {} / {} - {}".format(
page.page_params.get_param("subtheme"),
page.page_params.get_param("topic"),
context.c.session.get("period"),
difficulty,
page.page_params.get_param("q_id")
)
page.template_params["root"] = page.page_params.get_param("root")
page.template_params["q_id"] = page.page_params.get_param("q_id")
page.template_params["l_id"] = page.page_params.get_param("l_id")
page.template_params["language"] = PageLanguage.toStr(page.page_params.get_param("language"))
page.template_params["year"] = page.page_params.get_param("year").upper()
if page.page_params.get_param("language") == PageLanguage.RSC:
page.template_params["theme"] = Transliterate.rs(page.page_params.get_param("theme")).upper()
page.template_params["subtheme"] = Transliterate.rs(page.page_params.get_param("subtheme"))
else:
page.template_params["theme"] = page.page_params.get_param("theme").upper()
page.template_params["subtheme"] = page.page_params.get_param("subtheme")
page.template_params["topic"] = page.page_params.get_param("topic")
page.template_params["difficulty"] = int(difficulty) if difficulty else 1
page.template_params["h1"] = page.template_params['year']
page.template_params["h2"] = page.template_params["theme"]
page.template_params["h3"] = page.template_params["topic"]
page.template_params["h4"] = "Test"
page.template_params["exit"] = page.page_params.create_url(
op=PageOperation.MENU_THEME, \
language = PageLanguage.toStr(page.page_params.get_param("language")), \
year=page.page_params.get_param("year"), \
theme = "", \
subtheme = "", \
topic = "", \
difficulty = "", \
period = "", \
beta = True if page.page_params.get_param("beta") else None)
return
@staticmethod
@timer_section("render_summary_page")
def render_summary_page(page):
# Create dictionary entries that define menu
Design_default.add_menu(page)
page.template_params["template_name"] = Design_default._add_language(page, "summary.html.j2")
# page.template_params["results"] = []
# page.template_params["correct"] = 0
# page.template_params["incorrect"] = 0
# try:
# if context.c.session.get("history"):
# for r in context.c.session.get("history"):
# # r["q_id"], r["correct"], r["incorrect"]
# page.template_params["results"].append(r)
# page.template_params["correct"] = page.template_params["correct"] + int(r["correct"])
# page.template_params["incorrect"] = page.template_params["incorrect"] + int(r["incorrect"])
# except:
# pass
Design_default._render_result_bar_and_get_last_difficulty(page)
page.template_params["next"] = page.page_params.create_url(
op=PageOperation.MENU_THEME, \
language = PageLanguage.toStr(page.page_params.get_param("language")), \
year=page.page_params.get_param("year"), \
theme = "", \
subtheme = "", \
topic = "", \
difficulty = "", \
period = "", \
beta = True if page.page_params.get_param("beta") else None)
page.page_params.delete_history()
#page.page_params.set_param("q_id", "")
page.page_params.set_param("l_id", "")
page.template_params["h1"] = page.page_params.get_param("year").upper()
page.template_params["h2"] = page.page_params.get_param("theme").upper()
page.template_params["h3"] = page.page_params.get_param("topic").upper()
page.template_params["h4"] = "Rezultat"
return
@staticmethod
@timer_section("render_select_get_browse_started_page")
def render_select_get_browse_started_page(page):
page.page_params.delete_history()
page.page_params.set_param("q_id", "")
# Create dictionary entries that define menu
Design_default.add_menu(page)
page.template_params["template_name"] = Design_default._add_language(page, "browse_intro.html.j2")
page.template_params["year"] = page.page_params.get_param("year").title()
page.template_params["theme"] = page.page_params.get_param("theme").title()
page.template_params["subtheme"] = page.page_params.get_param("subtheme").title()
page.template_params["topic"] = page.page_params.get_param("topic").title()
page.template_params["period"] = context.c.session.get("period").title()
page.template_params["difficulty"] = context.c.session.get("difficulty").title()
page.template_params["h1"] = page.template_params['year']
page.template_params["h2"] = page.template_params["theme"]
page.template_params["h3"] = page.template_params["topic"]
page.template_params["h4"] = "Start browse"
test = List(page)
_, url_next = test.get_prev_next_questions_browse_url()
page.template_params["next"] = url_next
page.template_params["back"] = page.page_params.create_url(\
op = PageOperation.MENU_THEME,
language = PageLanguage.toStr(page.page_params.get_param("language")), \
subtheme = "",
topic = "",
period = "",
difficulty = "",
l_id = "",
beta = True if page.page_params.get_param("beta") else None
)
@staticmethod
@timer_section("render_browse_page")
def render_browse_page(page):
test = List(page)
Design_default._store_last_question_into_session(page)
# Create dictionary entries that define menu
Design_default.add_menu(page)
page.template_params["template_name"] = Design_default._add_language(page, "browse.html.j2")
q_id = page.page_params.get_param("q_id")
hist = None
context.c.session.set("browse_last_q", q_id)
# DEBUG
# context.c.session.print()
test_id = 0
q = Question(page=page, q_id=q_id)
q.set_from_file_with_exception()
q.eval_with_exception()
all_questions = page.repository.get_content_questions(
PageLanguage.toStr(page.page_params.get_param("language")),
page.page_params.get_param("year"),
page.page_params.get_param("theme"))
if page.page_params.get_param("q_id") in all_questions.keys():
try:
difficulty = int(all_questions[page.page_params.get_param("q_id")]["difficulty"])
except:
difficulty = 0
else:
difficulty = 0
url_prev, url_next = test.get_prev_next_questions_browse_url()
page.template_params["difficulty"] = difficulty
page.template_params["next"] = url_next
page.template_params["prev"] = url_prev
page.template_params["exit"] = page.page_params.create_url(\
op = PageOperation.MENU_THEME,
language = PageLanguage.toStr(page.page_params.get_param("language")), \
subtheme = "",
topic = "",
period = "",
difficulty = "",
l_id = "",
beta = True if page.page_params.get_param("beta") else None
)
Design_default._render_result_bar_and_get_last_difficulty(page)
page.template_params["debug"] = "DEBUG: {} / {} / {} - {}".format(
page.page_params.get_param("subtheme"),
page.page_params.get_param("topic"),
context.c.session.get("period"),
page.page_params.get_param("q_id")
)
page.template_params["root"] = page.page_params.get_param("root")
page.template_params["q_id"] = page.page_params.get_param("q_id")
page.template_params["l_id"] = page.page_params.get_param("l_id")
page.template_params["language"] = PageLanguage.toStr(page.page_params.get_param("language"))
page.template_params["year"] = page.page_params.get_param("year").upper()
if page.page_params.get_param("language") == PageLanguage.RSC:
page.template_params["theme"] = Transliterate.rs(page.page_params.get_param("theme")).upper()
page.template_params["subtheme"] = Transliterate.rs(page.page_params.get_param("subtheme"))
else:
page.template_params["theme"] = page.page_params.get_param("theme").upper()
page.template_params["subtheme"] = page.page_params.get_param("subtheme")
page.template_params["topic"] = page.page_params.get_param("topic")
page.template_params["h1"] = page.template_params['year']
page.template_params["h2"] = page.template_params["theme"]
page.template_params["h3"] = page.template_params["topic"]
page.template_params["h4"] = "Browse"
return
@staticmethod
def _render_user_one_cat_rec(page, cat, desc, indent):
hspace = "<div style='display:inline-block;padding-left:6px;padding-right:6px;'> </div>"
page.add_lines(" <tr>")
if indent > 1:
page.add_lines("<td></td> ")
page.add_lines("<td style='text-align:left'>{}{}{}</td> ".format(hspace, desc, hspace))
if indent <= 1:
page.add_lines("<td></td> ")
if "all" in cat.keys():
page.add_lines("<td style='text-align:center'>{}{}/{}({:3d}%/{:3d})%{}</td> ".format(
hspace,
cat["all"]["total"],
cat["all"]["subtotal"],
int(cat["all"]["questions"]*100),
int(cat["all"]["subquestions"]*100),
hspace
))
else:
page.add_lines("<td></td> ")
for d in range(1,4):
diff = str(d)
if "difficulty" in cat.keys() and diff in cat["difficulty"].keys():
page.add_lines("<td style='text-align:center'>{}{}/{}({:3d}%/{:3d}%){}</td> ".format(
hspace,
cat["difficulty"][diff]["total"],
cat["difficulty"][diff]["subtotal"],
int(cat["difficulty"][diff]["questions"]*100),
int(cat["difficulty"][diff]["subquestions"]*100),
hspace
))
else:
page.add_lines("<td></td> ")
page.add_lines("</tr>\n")
for kt in cat.keys():
if not (kt == "all" or kt == "difficulty"):
for k in cat[kt].keys():
Design_default._render_user_one_cat_rec(page, cat[kt][k], k, indent+1)
@staticmethod
@timer_section("render_user_stats")
def render_user_stats(page, u_id):
# Create dictionary entries that define menu
Design_default.add_menu(page)
page.template_params["h1"] = "Rezultat"
#prepare_user_stats_chart(page, 'Petar')
prepare_user_stats_chart(page, u_id)
@staticmethod
@timer_section("render_about")
def render_about(page):
# Create dictionary entries that define menu
Design_default.add_menu(page)
page.template_params["template_name"] = Design_default._add_language(page, "about.html.j2")
page.template_params["h1"] = "<NAME>"
|
StarcoderdataPython
|
28501
|
<filename>users.py<gh_stars>0
import json, base64
import logging, coloredlogs
import hashlib, copy
from flask_table import Table, Col
logger = logging.getLogger(__name__)
coloredlogs.install(level='INFO')
class Users:
def __init__(self):
self.__users = self.__load_users("json/users.json")
self.__generate_hash_keys()
def __load_json(self, filename):
with open(filename) as f:
return json.load(f)
def __load_users(self, config):
try:
config = self.__load_json(config)
except:
logger.critical("Could not find configuration file: " + config)
return config
def __generate_hash_keys(self):
for user in self.__users.get("users"):
hashed = hashlib.sha256(user.get("api_key").encode())
user["hashed_api_key"] = hashed.hexdigest()
def get_key(self, username):
for user in self.__users.get("users"):
if user.get("username") == username:
return user.get("api_key")
return ""
def get_user(self, key):
for user in self.__users.get("users"):
if user.get("api_key") == key:
return user.get("username")
return ""
def get_hashed_key(self, username):
for user in self.__users.get("users"):
if user.get("username") == username:
return user.get("hashed_api_key")
return ""
def authenticate(self, key):
for user in self.__users.get("users"):
if user.get("api_key") == key:
return True
return False
def get_level(self, key):
# Try to match api_key
for user in self.__users.get("users"):
if user.get("api_key") == key:
return user.get("level")
# Try to match username (fallback - is safe as already authed) --- no it's not
# for user in self.__users.get("users"):
# if user.get("username") == key:
# return user.get("level")
return 0
def get_users(self):
users = copy.deepcopy(self.__users)
for user in users.get("users"):
del user["api_key"]
return users
def get_user_table(self):
items = []
for user in self.get_users().get("users"):
items.append(Item(user.get("username"), user.get("level"), user.get("hashed_api_key")))
table = ItemTable(items)
return table.__html__()
# Declare your table
class ItemTable(Table):
classes = ['table table-dark']
name = Col('Username')
level = Col('Level')
hashed_pass = Col('Hashed API Key')
# Get some objects
class Item(object):
def __init__(self, name, level, hashed_pass):
self.name = name
self.level = level
self.hashed_pass = <PASSWORD>
|
StarcoderdataPython
|
8042361
|
from pettingzoo.utils.observation_saver import save_observation
import gym
import numpy as np
def check_save_obs(env):
for agent in env.agents:
assert isinstance(env.observation_spaces[agent], gym.spaces.Box), "Observations must be Box to save observations as image"
assert np.all(np.equal(env.observation_spaces[agent].low, 0)) and np.all(np.equal(env.observation_spaces[agent].high, 255)), "Observations must be 0 to 255 to save as image"
assert len(env.observation_spaces[agent].shape) == 3 or len(env.observation_spaces[agent].shape) == 2, "Observations must be 2D or 3D to save as image"
if len(env.observation_spaces[agent].shape) == 3:
assert env.observation_spaces[agent].shape[2] == 1 or env.observation_spaces[agent].shape[2] == 3, "3D observations can only have 1 or 3 channels to save as an image"
def test_save_obs(env):
env.reset()
try:
check_save_obs(env)
for agent in env.agent_iter(env.num_agents):
save_observation(env=env, agent=agent, save_dir="saved_observations")
except AssertionError as ae:
print("did not save the observations: ", ae)
|
StarcoderdataPython
|
5196734
|
from django.urls import re_path
from playlist import consumers
websocket_urlpatterns = [
re_path(r"^ws/playlist/device/$", consumers.PlaylistDeviceConsumer.as_asgi())
]
|
StarcoderdataPython
|
9762607
|
<reponame>its-dirg/saml-metadata-upload
import os
from io import BytesIO
import pytest
from flask_transfer.exc import UploadError
from metadata_upload.validation import SAMLMetadataValidator
class TestSAMLMetadataValidator():
@pytest.fixture(autouse=True)
def create_validator(self):
self.validator = SAMLMetadataValidator()
def test_accepts_valid_SAML_metadata(self):
xml_path = os.path.join(os.path.dirname(__file__), 'idp.xml')
with open(xml_path, 'rb') as xmldata:
assert self.validator(xmldata)
def test_rejects_invalid_xml(self):
non_saml_xml = """<?xml version='1.0' encoding='UTF-8'?>
<note>
<to>Tove</to>
<from>Jani</from>
<heading>Reminder</heading>
<body>Don't forget me this weekend!</body>
</note>"""
xmldata = BytesIO(non_saml_xml.encode('utf-8'))
with pytest.raises(UploadError):
self.validator(xmldata)
|
StarcoderdataPython
|
8166755
|
# coding=utf-8
"""Provide functionalities for managing I/O operations."""
from typing import Callable, List
def generic_vertex_ordering(
i: int,
N: int,
j: Callable = lambda i, N: i + N,
inc_i: int = 1,
inc_j: int = 1,
invert: bool = True,
) -> List:
"""Define the vertices order for each face of the STL solid.
Faces in STL files consist of three vertices, i.e., triangles. So,
considering two consecutive blade sections, we can imagine the
following.
(i + 2*inc_i) (i + inc_i) i
ith section -> ────O────────────O────────────O────
. . . . .
. . . . .
. . . . .
. . . . .
. . . . .
.. .. .
jth section -> ────O────────────O────────────O────
(j + 2*inc_y) (j + inc_y) j
This abstraction works well for blades with consistent, uniform
sections; i.e., sections should have the same number of points.
By default, we use clockwise ordering.
Parameters
----------
i
the vertex index on the current section.
N
an offset value corresponding to the same vertex i on a next
section.
j
a function describing the relationship between i and N.
inc_i
the increment for the next vertex in the ith section.
inc_j
the increment for the next vertex in the jth section.
invert
invert the vertices order: make them counterclockwise.
"""
return (
[[i, j(i, N), j(i, N) + inc_j], [i, j(i, N) + inc_j, i + inc_i]]
if invert
else [[i, j(i, N) + inc_j, j(i, N)], [i, i + inc_i, j(i, N) + inc_j]]
)
|
StarcoderdataPython
|
278743
|
"""Python API that wraps GeoIP country database lookup into a simple function.
Download the latest MaxMind GeoIP country database and read other docs here:
http://www.maxmind.com/app/geolitecountry
Copyright (C) 2009 <NAME>, released under the Lesser General Public License:
http://www.gnu.org/licenses/lgpl.txt
Usage examples:
>>> country('172.16.58.3')
'US'
>>> country('192.168.3.11')
'NZ'
>>> country('asdf')
''
>>> country('127.0.0.1')
''
"""
from lib.settings import ROOTDIR
# List of country codes (indexed by GeoIP country ID number)
countries = (
'', 'AP', 'EU', 'AD', 'AE', 'AF', 'AG', 'AI', 'AL', 'AM', 'AN', 'AO', 'AQ',
'AR', 'AS', 'AT', 'AU', 'AW', 'AZ', 'BA', 'BB', 'BD', 'BE', 'BF', 'BG', 'BH',
'BI', 'BJ', 'BM', 'BN', 'BO', 'BR', 'BS', 'BT', 'BV', 'BW', 'BY', 'BZ', 'CA',
'CC', 'CD', 'CF', 'CG', 'CH', 'CI', 'CK', 'CL', 'CM', 'CN', 'CO', 'CR', 'CU',
'CV', 'CX', 'CY', 'CZ', 'DE', 'DJ', 'DK', 'DM', 'DO', 'DZ', 'EC', 'EE', 'EG',
'EH', 'ER', 'ES', 'ET', 'FI', 'FJ', 'FK', 'FM', 'FO', 'FR', 'FX', 'GA', 'GB',
'GD', 'GE', 'GF', 'GH', 'GI', 'GL', 'GM', 'GN', 'GP', 'GQ', 'GR', 'GS', 'GT',
'GU', 'GW', 'GY', 'HK', 'HM', 'HN', 'HR', 'HT', 'HU', 'ID', 'IE', 'IL', 'IN',
'IO', 'IQ', 'IR', 'IS', 'IT', 'JM', 'JO', 'JP', 'KE', 'KG', 'KH', 'KI', 'KM',
'KN', 'KP', 'KR', 'KW', 'KY', 'KZ', 'LA', 'LB', 'LC', 'LI', 'LK', 'LR', 'LS',
'LT', 'LU', 'LV', 'LY', 'MA', 'MC', 'MD', 'MG', 'MH', 'MK', 'ML', 'MM', 'MN',
'MO', 'MP', 'MQ', 'MR', 'MS', 'MT', 'MU', 'MV', 'MW', 'MX', 'MY', 'MZ', 'NA',
'NC', 'NE', 'NF', 'NG', 'NI', 'NL', 'NO', 'NP', 'NR', 'NU', 'NZ', 'OM', 'PA',
'PE', 'PF', 'PG', 'PH', 'PK', 'PL', 'PM', 'PN', 'PR', 'PS', 'PT', 'PW', 'PY',
'QA', 'RE', 'RO', 'RU', 'RW', 'SA', 'SB', 'SC', 'SD', 'SE', 'SG', 'SH', 'SI',
'SJ', 'SK', 'SL', 'SM', 'SN', 'SO', 'SR', 'ST', 'SV', 'SY', 'SZ', 'TC', 'TD',
'TF', 'TG', 'TH', 'TJ', 'TK', 'TM', 'TN', 'TO', 'TL', 'TR', 'TT', 'TV', 'TW',
'TZ', 'UA', 'UG', 'UM', 'US', 'UY', 'UZ', 'VA', 'VC', 'VE', 'VG', 'VI', 'VN',
'VU', 'WF', 'WS', 'YE', 'YT', 'RS', 'ZA', 'ZM', 'ME', 'ZW', 'A1', 'A2', 'O1',
'AX', 'GG', 'IM', 'JE', 'BL', 'MF')
def iptonum(ip):
"""Convert IP address string to 32-bit integer, or return None if IP is bad.
>>> iptonum('0.0.0.0')
0
>>> hex(iptonum('127.0.0.1'))
'0x7f000001'
>>> hex(iptonum('255.255.255.255'))
'0xffffffffL'
>>> iptonum('127.0.0.256')
>>> iptonum('1.2.3')
>>> iptonum('a.s.d.f')
>>> iptonum('1.2.3.-4')
>>> iptonum('')
"""
segments = ip.split('.')
if len(segments) != 4:
return None
num = 0
for segment in segments:
try:
segment = int(segment)
except ValueError:
return None
if segment < 0 or segment > 255:
return None
num = num << 8 | segment
return num
class DatabaseError(Exception):
pass
class GeoIP(object):
"""Wraps GeoIP country database lookup into a class."""
_record_length = 3
_country_start = 16776960
def __init__(self, dbname):
"""Init GeoIP instance with given GeoIP country database file."""
self._dbfile = open(dbname, 'rb')
def country(self, ip):
"""Lookup IP address string and turn it into a two-letter country code
like 'NZ', or return empty string if unknown.
>>> g = GeoIP()
>>> g.country('172.16.58.3')
'US'
>>> g.country('192.168.3.11')
'NZ'
>>> g.country('asdf')
''
>>> g.country('127.0.0.1')
''
"""
ipnum = iptonum(ip)
if ipnum is None:
return ''
return countries[self._country_id(ipnum)]
def _country_id(self, ipnum):
"""Look up and return country ID of given 32-bit IP address."""
# Search algorithm from: http://code.google.com/p/pygeoip/
offset = 0
for depth in range(31, -1, -1):
self._dbfile.seek(offset * 2 * self._record_length)
data = self._dbfile.read(2 * self._record_length)
x = [0, 0]
for i in range(2):
for j in range(self._record_length):
x[i] += ord(data[self._record_length * i + j]) << (j * 8)
i = 1 if ipnum & (1 << depth) else 0
if x[i] >= self._country_start:
return x[i] - self._country_start
offset = x[i]
raise DatabaseError('GeoIP database corrupt: offset=%s' % offset)
def country(ip, dbname):
"""Helper function that creates a GeoIP instance and calls country()."""
return GeoIP(dbname).country(ip)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
StarcoderdataPython
|
9749865
|
<gh_stars>0
from datetime import datetime, timedelta
from timely_beliefs.beliefs.utils import load_time_series
from scipy.special import erfinv
from bokeh.palettes import viridis
from bokeh.io import show
from bokeh.models import ColumnDataSource, FixedTicker, FuncTickFormatter, LinearAxis
from bokeh.plotting import figure
import time, pickle
import datetime
import pytz
import isodate
import pandas as pd
import timely_beliefs as tb
import numpy as np
import scipy.stats as stats
# code to read in the csv file:
def read_beliefs_from_csv(sensor, source, cp, event_resolution: timedelta, tz_hour_difference: float = 0) -> list:
"""
Returns a timely_beliefs DataFrame read from a csv file
@param sensor : beliefsensor
@param source : BeliefSource
@param cp : float, cummulative probability
@param event_resolution : timedelta object, event resolution
@param tz_hour_difference : float,time difference
"""
sensor_descriptions = (("Temperature", "°C"),)
cols = [0, 1] # Columns with datetime index and observed values
horizons = list(range(0, 169, 1))
cols.extend([h + 2 for h in horizons])
n_horizons = 169
n_events = None
beliefs = pd.read_csv("%s-%s-%s.csv" % (sensor.name.replace(' ', '_').lower(), source.name.replace(' ', '_').lower(), cp),
index_col=0, parse_dates=[0], date_parser=lambda col: pd.to_datetime(col, utc=True) - timedelta(hours=tz_hour_difference),
nrows=n_events, usecols=cols)
beliefs = beliefs.resample(event_resolution).mean()
assert beliefs.index.tzinfo == pytz.utc
# Construct the BeliefsDataFrame by looping over the belief horizons
blfs = load_time_series(beliefs.iloc[:, 0].head(n_events), sensor=sensor, source=source,
belief_horizon=timedelta(hours=0), cumulative_probability=0.5) # load the observations (keep cp=0.5)
for h in beliefs.iloc[:, 1 :n_horizons + 1] :
try:
blfs += load_time_series(beliefs[h].head(n_events), sensor=sensor, source=source,
belief_horizon=(isodate.parse_duration(
"PT%s" % h)) + event_resolution, cumulative_probability=cp) # load the forecasts
except isodate.isoerror.ISO8601Error: # In case of old headers that don't yet follow the ISO 8601 standard
blfs += load_time_series(beliefs[h].head(n_events), sensor=sensor, source=source,
belief_horizon=(isodate.parse_duration(
"%s" % h)) + event_resolution, cumulative_probability=cp) # load the forecasts
return blfs
def make_df(n_events = 100, n_horizons = 169, tz_hour_difference=-9, event_resolution=timedelta(hours=1)):
"""
Returns DataFrame in which n events and n horizons are stored
@param n_events: int,number of events in DataFrame
@param n_horizons: int,number of horizons in DataFrame
@param tz_hour_difference: float,time difference
@param event_resolution: timedelta object,event resolution
"""
sensor_descriptions = (("Temperature", "°C"),)
source = tb.BeliefSource(name="Random forest")
sensors = (tb.Sensor(name=descr[0], unit=descr[1], event_resolution=event_resolution) for descr in sensor_descriptions)
blfs=[]
for sensor in sensors:
blfs += read_beliefs_from_csv(sensor, source=source, cp=0.05, event_resolution=event_resolution, tz_hour_difference=tz_hour_difference)
blfs += read_beliefs_from_csv(sensor, source=source, cp=0.5, event_resolution=event_resolution, tz_hour_difference=tz_hour_difference)
blfs += read_beliefs_from_csv(sensor, source=source, cp=0.95, event_resolution=event_resolution, tz_hour_difference=tz_hour_difference)
bdf = tb.BeliefsDataFrame(sensor=sensor, beliefs=blfs).sort_index()
return bdf
# end of code to read in csv file
def create_cp_data(df, start, end, start_time, fixedviewpoint):
"""
Returns 3 lists with values of 2 different cumulative probabilities.
Solely 1 out of 3 is 0.5.
@param df: BeliefsDataFrame, containing events, belief times, predictions and their cumulative probabilities of 0.05/0.5/0.95
@param start: int,start of timedelta in hours
@param end: int,end of timedelta in hours
@param start_time: datetime object, start of event
@param fixedviewpoint: BOOLEAN,if true plot based on future predictions
"""
first_date = df.iloc[0].name[0]
last_date = df.iloc[-1].name[0]
# check if current time is in data frame
if start_time < first_date or start_time > last_date :
raise ValueError('Your start time is not in the dataframe')
# get cp for fixed viewpoint or not
if fixedviewpoint == True:
df = df[df.index.get_level_values("event_start") >= start_time]
bdf = df.fixed_viewpoint(start_time)
end = len(bdf)
cp0 = bdf.iloc[0].name[3]
cp1 = bdf.iloc[1].name[3]
cp2 = bdf.iloc[2].name[3]
else:
bdf = df.belief_history(event_start=start_time, belief_horizon_window=(timedelta(hours=start), timedelta(hours=end)))
cp0 = bdf.iloc[0].name[2]
cp1 = bdf.iloc[1].name[2]
cp2 = bdf.iloc[2].name[2]
# list of cps
cp_list = [cp0, cp1, cp2]
list_0 = []
list_1 = []
list_2 = []
# make list for each cumulative probability; has to include 0.5
if 0.5 in cp_list:
i = 0
index = cp_list.index(0.5)
for _, value in bdf.iterrows():
i = i%3
if i == 0:
list_0 += [value[0]]
elif i == 1:
list_1 += [value[0]]
elif i == 2:
list_2 += [value[0]]
i += 1
cp_list.remove(0.5)
lists = [list_0, list_1, list_2]
mean_list = lists.pop(index)
return (cp_list, mean_list, lists[0], lists[1])
raise ValueError("No mean cp value")
def ridgeline_plot(start_time, df, start=0, end=168, fixedviewpoint = False):
"""
Creates ridgeline plot by selecting a belief history about a specific event
@param start_time : datetime string of selected event
@param df : timely_beliefs DataFrame
@param start : start of hours before event time
@param end : end of hours before event time
@param fixedviewpoint : if true create fixed viewpoint plot
"""
# set parameters
if fixedviewpoint == True:
start = 0
# out of bounds checks
if end < 0 or end > 168:
raise ValueError("End of the forecast horizon must be between 0 and 168 hours.")
if start < 0 or start > end:
raise ValueError("Start of the forecast horizon must be between 0 and 168 hours.")
#to include last observation
end += 1
#get cps
cp_list, pred_temp_05, pred_temp_0, pred_temp_2 = create_cp_data(df,start,end,start_time,fixedviewpoint)
# make means and standard deviation
mean = np.array([float(i) for i in pred_temp_05])
std1 = np.array([(float(pred_temp_0[i])-float(pred_temp_05[i]))/(np.sqrt(2)*erfinv((2*cp_list[0])-1)) for i in range(len(pred_temp_05))])
std2 = np.array([(float(pred_temp_2[i])-float(pred_temp_05[i]))/(np.sqrt(2)*erfinv((2*cp_list[1])-1)) for i in range(len(pred_temp_05))])
std = (std1+std2)/2
#plot everything
show_plot(mean, std, start, end, fixedviewpoint)
def show_plot(mean, std, start, end, fixedviewpoint=False):
"""
Creates and shows ridgeline plot
@param mean: list of float, mean values
@param std: list of float, standard deviation values
@param start: int,start hours before event-time
@param end: int,end hours before event-time
@param fixedviewpoint : BOOLEAN, if true create fixed viewpoint plot
"""
# amount of lines to draw
nr_lines = len(mean)
x = np.linspace(-10, 30, 500)
frame = pd.DataFrame()
# generate points for each line
for i in range(nr_lines):
frame["{}".format(i)] = stats.norm.pdf(x, mean[i], std[i])
#set color pallete to viridis
pallete = viridis(nr_lines)
# set list reversed or not depending on viewpoint
if fixedviewpoint:
cats = list(frame.keys())
else:
cats = list(reversed(frame.keys()))
source = ColumnDataSource(data=dict(x=x))
# create figure
p = figure(y_range=cats, plot_width=900, x_range=(-5, 30), toolbar_location=None)
# create a ridgeline in the figure
for i, cat in enumerate(reversed(cats)):
y = ridge(cat, frame[cat], 50)
source.add(y, cat)
p.patch('x', cat, alpha=0.6, color=pallete[i], line_color="black", source=source)
# added y axis with the right ticks and lables depending on fixedviewpoint
if fixedviewpoint:
p.yaxis.axis_label = 'Upcoming hours'
y_ticks = list(np.arange(0, nr_lines, 6))
yaxis = LinearAxis(ticker=y_ticks)
y_labels = list((np.arange(0, nr_lines, 6)))
else:
p.yaxis.axis_label = 'Previous hours'
y_ticks = list(np.arange(0, end, 6))[::-1]
yaxis = LinearAxis(ticker=y_ticks)
y_labels = list(np.arange(start, end, 6))
# map ticks on to a dict
mapping_dict = {y_ticks[i]: str(y_labels[i]) for i in range(len(y_labels))}
for i in range(nr_lines):
if i not in mapping_dict:
mapping_dict[i]=" "
mapping_code = "var mapping = {};\n return mapping[tick];\n ".format(mapping_dict)
#format ticks
p.yaxis.formatter = FuncTickFormatter(code=mapping_code)
#set plot atributes
p.outline_line_color = None
p.background_fill_color = "#ffffff"
p.xaxis.ticker = FixedTicker(ticks=list(range(-20, 101, 10)))
p.xaxis.axis_label = 'Temperature (°C)'
p.ygrid.grid_line_color = None
p.xgrid.grid_line_color = "#000000"
p.xgrid.ticker = p.xaxis[0].ticker
p.axis.minor_tick_line_color = None
p.axis.major_tick_line_color = None
p.axis.axis_line_color = None
#add padding in y derection
p.y_range.range_padding = 0.2 / (nr_lines / 168)
show(p)
# creates a list for the ridgeline function as input
def ridge(category, data, scale=100):
return list(zip([category] * len(data), scale * data))
# tests
df = make_df()
ridgeline_plot(datetime.datetime(2015, 3, 1, 9, 0, tzinfo=pytz.utc), df, end=150, fixedviewpoint=False)
ridgeline_plot(datetime.datetime(2015, 3, 1, 9, 0, tzinfo=pytz.utc), df, fixedviewpoint=True)
|
StarcoderdataPython
|
1901435
|
num = 10
num2=20
明天放假
|
StarcoderdataPython
|
4802592
|
<filename>pyspedas/omni/tests/tests.py<gh_stars>10-100
import os
import unittest
import pandas as pd
from pyspedas.utilities.data_exists import data_exists
import pyspedas
class LoadTestCases(unittest.TestCase):
def test_utc_timestamp_regression(self):
varname = 'BX_GSE'
data_omni = pyspedas.omni.data(trange=['2010-01-01/00:00:00', '2010-01-02/00:00:00'],notplot=True,varformat=varname,time_clip=True)
date_time = pd.to_datetime(data_omni[varname]['x'],unit='s')
self.assertTrue(str(date_time[0]) == '2010-01-01 00:00:00')
def test_load_hro2_data(self):
omni_vars = pyspedas.omni.data()
self.assertTrue(data_exists('BX_GSE'))
self.assertTrue(data_exists('BY_GSE'))
self.assertTrue(data_exists('BZ_GSE'))
self.assertTrue(data_exists('BY_GSM'))
self.assertTrue(data_exists('BZ_GSM'))
self.assertTrue(data_exists('proton_density'))
def test_load_hro_data(self):
omni_vars = pyspedas.omni.data(level='hro')
self.assertTrue(data_exists('BX_GSE'))
self.assertTrue(data_exists('BY_GSE'))
self.assertTrue(data_exists('BZ_GSE'))
self.assertTrue(data_exists('BY_GSM'))
self.assertTrue(data_exists('BZ_GSM'))
self.assertTrue(data_exists('proton_density'))
def test_load_hro_5min_data(self):
omni_vars = pyspedas.omni.data(level='hro', datatype='5min')
self.assertTrue(data_exists('BX_GSE'))
self.assertTrue(data_exists('BY_GSE'))
self.assertTrue(data_exists('BZ_GSE'))
self.assertTrue(data_exists('BY_GSM'))
self.assertTrue(data_exists('BZ_GSM'))
self.assertTrue(data_exists('proton_density'))
def test_downloadonly(self):
files = pyspedas.omni.data(downloadonly=True, trange=['2014-2-15', '2014-2-16'])
self.assertTrue(os.path.exists(files[0]))
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
3262805
|
<reponame>bqmoreland/EASwift
import sys
import tkinter as tk
from tkinter import ttk
import math
import time
import random
from tkinter import messagebox
from PIL import Image, ImageTk
from tkinter.filedialog import askopenfilename
from tkinter.filedialog import asksaveasfilename
ord_a = ord("A")
infinity = 1000000
def beep():
print("\a", end="")
sys.stdout.flush()
def number_to_letters(number):
""" Return a number converted into letters as in A, B, C, ..., AA, AB, AC, ..., BA, BB, BC, ..."""
result = ""
while True:
letter_num = number % 26
number //= 26
ch = chr(ord_a + letter_num)
result = ch + result
if number <= 0:
break
return result
def array_to_string(array, pad, as_letter):
""" Return a string representation of a two-dimensional array."""
txt = ""
num_rows = len(array)
num_cols = len(array[0])
for r in range(num_rows):
for c in range(num_cols):
value = ""
if ((array[r][c] == infinity) or (array[r][c] < 0)):
value = "-"
elif as_letter:
value = number_to_letters(array[r][c])
else:
value = f"{array[r][c]}"
txt += value.rjust(pad)
txt += "\n"
return txt
class Modes:
""" What the user is doing on the PictureBox."""
none = 0
add_node = 1
add_link1 = 2
add_link2 = 3
df_traversal = 4
bf_traversal = 5
spanning_tree = 6
minimal_spanning_tree = 7
any_path = 8
label_setting_tree = 9
label_setting_path = 10
label_correcting_tree = 11
label_correcting_path = 12
connected_components = 13
all_pairs = 14
def draw_arrowhead(canvas, color, head, nx, ny, length):
""" Draw an arrowhead at the given point in the normalized direction <nx, ny>."""
ax = length * (-ny - nx)
ay = length * (nx - ny)
points = \
[
head[0] + ax, head[1] + ay,
head[0], head[1],
head[0] - ay, head[1] + ax
]
canvas.create_polygon(points, fill=color)
class Node:
radius = 11
radius_squared = radius * radius
def __init__(self, name, location, index):
self.name = name
self.text = name
self.location = location
self.index = index
self.links = []
# Properties used by algorithms.
self.visited = False
# The node and link before this one in a spanning tree or path.
self.from_node = None
self.from_link = None
# The distance from the root node to this node.
self.distance = -1
def __str__(self):
""" Return the node's current text."""
return self.name
def draw(self, canvas, show_text, fill_color, outline_color, text_color):
""" Draw the node."""
# Fill and outline the node.
x0 = self.location[0] - Node.radius
y0 = self.location[1] - Node.radius
x1 = self.location[0] + Node.radius
y1 = self.location[1] + Node.radius
canvas.create_oval(x0, y0, x1, y1, fill=fill_color, outline=outline_color)
# Draw the node's current text.
if ((show_text) and (self.text != None)):
canvas.create_text(self.location[0], self.location[1], text=self.text, fill=text_color)
else:
canvas.create_text(self.location[0], self.location[1], text=self.name, fill=text_color)
def is_at(self, location):
""" Return True if the node is at the indicated location."""
dx = self.location[0] - location[0]
dy = self.location[1] - location[1]
return dx * dx + dy * dy <= Node.radius_squared
def add_link_to(self, node):
""" Add a link to the indicated node."""
Link(self, node)
class Link:
def __init__(self, node0, node1):
self.node0 = node0
self.node1 = node1
self.visited = False
dx = node0.location[0] - node1.location[0]
dy = node0.location[1] - node1.location[1]
self.cost = int(math.sqrt(dx * dx + dy * dy))
self.capacity = random.randint(1, 4) + random.randint(0, 2)
def __str__(self):
""" Return the names of the nodes."""
return f"{self.node0.name} --> {self.node1.name}"
def draw(self, canvas, show_costs, show_capacities, link_width, line_color, text_color):
""" Draw the link."""
dx = self.node1.location[0] - self.node0.location[0]
dy = self.node1.location[1] - self.node0.location[1]
dist = math.sqrt(dx * dx + dy * dy)
nx = dx / dist
ny = dy / dist
head = (
self.node1.location[0] - Node.radius * nx,
self.node1.location[1] - Node.radius * ny)
# Draw the link.
canvas.create_line(
self.node0.location[0],
self.node0.location[1],
self.node1.location[0],
self.node1.location[1],
fill=line_color, width=link_width)
draw_arrowhead(canvas, line_color, head, nx, ny, Node.radius / 2)
# Draw the link's cost.
if show_costs:
dx *= 3 * Node.radius / dist
dy *= 3 * Node.radius / dist
x = self.node0.location[0] + dx
y = self.node0.location[1] + dy
canvas.create_text(x, y, text=f"{self.cost}")
# Draw the link's capacity.
if show_capacities:
dx *= 3 * Node.radius / dist
dy *= 3 * Node.radius / dist
x = self.node0.location[0] + dx
y = self.node0.location[1] + dy
canvas.create_text(x, y, text=f"{self.capacity}")
class Network:
def __init__(self):
# A list holding all nodes.
self.all_nodes = []
def save_network(self, filename):
""" Save the network into the file."""
with open(filename, "w") as output:
# Save the number of nodes.
output.write(f"{len(self.all_nodes)}" + "\n")
# Renumber the nodes.
for i in range(len(self.all_nodes)):
self.all_nodes[i].index = i
# Save the node information.
for node in self.all_nodes:
# Save this node's information.
output.write(f"{node.name},{node.location[0]},{node.location[1]}")
# Save information about this node's links.
for link in node.links:
other_node = link.node0
if (link.node0 == node):
other_node = link.node1
output.write(f",{other_node.index},{link.cost},{link.capacity}")
output.write("\n")
@staticmethod
def load_network(filename):
""" Create a network from a network file."""
# Make a new network.
network = Network()
# Read the data.
with open(filename, "r") as input:
all_text = input.read()
all_lines = all_text.split("\n")
# Get the number of nodes.
num_nodes = int(all_lines[0])
# Create the nodes.
for i in range(num_nodes):
network.all_nodes.append(Node("*", (-1, -1), i))
# Read the nodes.
for i in range(1, num_nodes + 1):
node = network.all_nodes[i - 1]
node_fields = all_lines[i].split(",")
# Get the node's text and coordinates.
name = node_fields[0]
location = (
int(node_fields[1]),
int(node_fields[2])
)
node.name = name
node.text = name
node.location = location
# Get the node's links.
for j in range(3, len(node_fields), 3):
# Get the next link.
index = int(node_fields[j])
link = Link(node, network.all_nodes[index])
link.cost = int(node_fields[j + 1])
link.capacity = int(node_fields[j + 2])
node.links.append(link)
return network
def draw(self, canvas, show_node_text, show_costs, show_capacities,
link_width, link_color, link_text_color, link_width2, link_color2, link_text_color2,
node_fill_color, node_outline_color, node_text_color,
node_fill_color2, node_outline_color2, node_text_color2):
""" Draw the network."""
# Draw the non-highlighted links.
for node in self.all_nodes:
for link in node.links:
if not link.visited:
link.draw(canvas, show_costs, show_capacities, link_width, link_color, link_text_color2)
# Draw the highlighted links.
for node in self.all_nodes:
for link in node.links:
if link.visited:
link.draw(canvas, show_costs, show_capacities, link_width2, link_color2, link_text_color2)
# Draw the non-highlighted nodes.
for node in self.all_nodes:
if not node.visited:
node.draw(canvas, show_node_text, node_fill_color, node_outline_color, node_text_color)
# Draw the highlighted nodes.
for node in self.all_nodes:
if node.visited:
node.draw(canvas, show_node_text, node_fill_color2, node_outline_color2, node_text_color2)
def find_node(self, location):
""" Find the node at the given position."""
for node in self.all_nodes:
if node.is_at(location):
return node
return None
def make_link(self, node0, node1):
""" Make a link from node0 to node1."""
Link(node0, node1)
def make_links(self, node0, node1):
""" Make links from node0 to node1 and node1 to node0."""
Link(node0, node1)
Link(node1, node0)
def reset_network(self):
""" Reset the network."""
# Deselect all nodes and branches.
self.deselect_nodes()
self.deselect_links()
# Clear the nodes' text properties.
for node in self.all_nodes:
node.from_link = None
node.from_node = None
node.text = None
def deselect_nodes(self):
""" Deselect all nodes."""
for node in self.all_nodes:
node.visited = False
def deselect_links(self):
""" Deselect all links."""
for node in self.all_nodes:
for link in node.links:
link.visited = False
def depth_first_traverse(self, start_node):
""" Traverse the network in depth-first order."""
# Reset the network.
self.reset_network()
# Keep track of the number of nodes in the traversal.
num_done = 0
# Push the start node onto the stack.
stack = []
stack.append(start_node)
# Visit the start node.
traversal = []
traversal.append(start_node)
start_node.visited = True
start_node.text = f"{num_done}"
num_done += 1
# Process the stack until it's empty.
while len(stack) > 0:
# Get the next node from the stack.
node = stack.pop()
# Process the node's links.
for link in node.links:
to_node = link.node1
# Only use the link if the destination
# node hasn't been visited.
if not to_node.visited:
# Mark the node as visited.
to_node.visited = True
to_node.text = f"{num_done}"
num_done += 1
# Add the node to the traversal.
traversal.append(to_node)
# Add the link to the traversal.
link.visited = True
# Push the node onto the stack.
stack.append(to_node)
# See if the network is connected.
is_connected = True
for node in self.all_nodes:
if not node.visited:
is_connected = False
break
return traversal, is_connected
def breadth_first_traverse(self, start_node):
""" Traverse the network in breadth-first order."""
# Reset the network.
self.reset_network()
# Keep track of the number of nodes in the traversal.
num_done = 0
# Add the start node to the queue.
queue = []
queue.insert(0, start_node)
# Visit the start node.
traversal = []
traversal.append(start_node)
start_node.visited = True
start_node.text = f"{num_done}"
num_done += 1
# Process the queue until it's empty.
while len(queue) > 0:
# Get the next node from the queue.
node = queue.pop()
# Process the node's links.
for link in node.links:
to_node = link.node1
# Only use the link if the destination
# node hasn't been visited.
if not to_node.visited:
# Mark the node as visited.
to_node.visited = True
to_node.text = f"{num_done}"
num_done += 1
# Add the node to the traversal.
traversal.append(to_node)
# Add the link to the traversal.
link.visited = True
# Add the node onto the queue.
queue.insert(0, to_node)
# See if the network is connected.
is_connected = True
for node in self.all_nodes:
if not node.visited:
is_connected = False
break
return traversal, is_connected
def get_connected_components(self):
""" Return the network's connected components."""
# Reset the network.
self.reset_network()
# Keep track of the number of nodes visited.
num_visited = 0
# Make the result list of lists.
components = []
# Repeat until all nodes are in a connected component.
while num_visited < len(self.all_nodes):
# Find a node that hasn't been visited.
start_node = None
for node in self.all_nodes:
if not node.visited:
start_node = node
break
# Make sure we found one.
assert start_node != None
# Add the start node to the stack.
stack = []
stack.append(start_node)
start_node.visited = True
num_visited += 1
# Add the node to a new connected component.
component = []
components.append(component)
component.append(start_node)
# Process the stack until it's empty.
while len(stack) > 0:
# Get the next node from the stack.
node = stack.pop()
# Process the node's links.
for link in node.links:
# Only use the link if the destination
# node hasn't been visited.
to_node = link.node1
if not to_node.visited:
# Mark the node as visited.
to_node.visited = True
# Mark the link as part of the tree.
link.visited = True
num_visited += 1
# Add the node to the current connected component.
component.append(to_node)
# Push the node onto the stack.
stack.append(to_node)
# Return the components.
return components
def make_spanning_tree(self, root):
""" Build a spanning tree. Return its total cost and whether it is connected."""
# Reset the network.
self.reset_network()
# The total cost of the links in the spanning tree.
total_cost = 0
# Push the root node onto the stack.
stack = []
stack.append(root)
# Visit the root node.
root.visited = True
# Process the stack until it's empty.
while len(stack) > 0:
# Get the next node from the stack.
node = stack.pop()
# Process the node's links.
for link in node.links:
# Only use the link if the destination
# node hasn't been visited.
to_node = link.node1
if not to_node.visited:
# Mark the node as visited.
to_node.visited = True
# Record the node that got us here.
to_node.from_node = node
# Mark the link as part of the tree.
link.visited = True
# Push the node onto the stack.
stack.append(to_node)
# Add the link's cost to the total cost.
total_cost += link.cost
# See if the network is connected.
is_connected = True
for node in self.all_nodes:
if not node.visited:
is_connected = False
break
return total_cost, is_connected
def make_minimal_spanning_tree(self, root):
""""
Build a minimal spanning tree. Return its total cost and whether it is connected.
When it adds a node to the spanning tree, the algorithm
also adds its links that lead outside of the tree to a list.
Later it searches that list for a minimal link.
"""
# Reset the network.
self.reset_network()
# The total cost of the links in the spanning tree.
total_cost = 0
# Add the root node's links to the link candidate list.
candidate_links = []
for link in root.links:
candidate_links.append(link)
# Visit the root node.
root.visited = True
# Process the list until it's empty.
while len(candidate_links) > 0:
# Find the link with the lowest cost.
best_link = candidate_links[0]
best_cost = best_link.cost
for i in range(1, len(candidate_links)):
if candidate_links[i].cost < best_cost:
# Save this improvement.
best_link = candidate_links[i]
best_cost = best_link.cost
# Remove the link from the list.
candidate_links.remove(best_link)
# Get the node at the other end of the link.
to_node = best_link.node1
# See if the link's node is still unmarked.
if not to_node.visited:
# Use the link.
best_link.visited = True
total_cost += best_link.cost
to_node.visited = True
# Record the node that got us here.
to_node.from_node = best_link.node0
# Process to_node's links.
for new_link in to_node.links:
# If the node hasn't been visited,
# add the link to the list.
if not new_link.node1.visited:
candidate_links.append(new_link)
# See if the network is connected.
is_connected = True
for node in self.all_nodes:
if not node.visited:
is_connected = False
break
return total_cost, is_connected
def find_any_path(self, from_node, to_node):
"""
Find any path between the two nodes. Return the path's total cost,
the nodes in the path, and the links in the path.
"""
# Make a spanning tree.
self.make_spanning_tree(from_node)
# Follow the tree's links back from to_node to from_node.
return self.find_spanning_tree_path(from_node, to_node)
def find_label_setting_path_tree(self, from_node):
"""
Find a shortest path tree rooted at from_node
by using a label setting algorithm. Return the tree's total cost.
"""
# Reset the network.
self.reset_network()
# Keep track of the number of nodes in the tree.
num_done = 0
# Add the start node to the shortest path tree.
from_node.visited = True
from_node.distance = 0
from_node.text = f"{num_done}"
num_done += 1
# Track the tree's total cost.
cost = 0
# Make the candidate list.
candidate_links = []
# Add the start node's links to the candidate list.
for link in from_node.links:
candidate_links.append(link)
# Make a shortest path tree.
while len(candidate_links) > 0:
# Find the best link.
best_link = None
best_cost = infinity
for i in range(len(candidate_links) - 1, -1, -1):
test_link = candidate_links[i]
# See if the link leads outside the tree.
if test_link.node1.visited:
# Remove this link.
del candidate_links[i]
else:
# See if this link is an improvement.
test_cost = test_link.node0.distance + test_link.cost
if test_cost < best_cost:
best_cost = test_cost
best_link = test_link
# If we found no link, then the candidate
# list must be empty and we're done.
if best_link == None:
assert len(candidate_links) == 0
break
# Use this link.
# Remove it from the candidate list.
candidate_links.remove(best_link)
# Add the node to the tree.
best_node = best_link.node1
best_node.distance = best_link.node0.distance + best_link.cost
best_node.visited = True
best_link.visited = True
best_node.from_node = best_link.node0
best_node.text = f"{num_done}"
num_done += 1
# Add the node's links to the tree.
for new_link in best_node.links:
if not new_link.node1.visited:
candidate_links.append(new_link)
# Add the link's cost to the tree's total cost.
cost += best_link.cost
# Return the total cost.
return cost
def find_label_setting_path(self, from_node, to_node):
"""
Find a shortest path between the two nodes
by using a label setting algorithm.
Return the path's total cost, nodes, and links.
"""
# Build a shortest path tree.
self.find_label_setting_path_tree(from_node)
# Follow the tree's links back from to_node to from_node.
return self.find_spanning_tree_path(from_node, to_node)
def find_spanning_tree_path(self, from_node, to_node):
""""
Follow a spanning tree's links to find a path from from_node to to_node.
Return the nodes and links in the path.
"""
# Follow the tree's links back from to_node to from_node.
path_nodes = []
path_links = []
current_node = to_node
while current_node != from_node:
# Add this node to the path.
path_nodes.append(current_node)
# Find the previous node.
prev_node = current_node.from_node
# Find the link that leads to current_node.
prev_link = None
for link in prev_node.links:
if link.node1 == current_node:
prev_link = link
break
# Make sure we found the link.
assert prev_link != None
# Add the link to the path.
path_links.append(prev_link)
# Move to the next node.
current_node = prev_node
# Add the start node.
path_nodes.append(from_node)
# Reverse the order of the nodes and links.
path_nodes.reverse()
path_links.reverse()
# Unmark all nodes and links.
self.deselect_nodes()
self.deselect_links()
# Marks the path's nodes and links.
for node in path_nodes:
node.visited = True
for link in path_links:
link.visited = True
# Calculate the cost of the path.
cost = 0
for link in path_links:
cost += link.cost
# Return the cost.
return cost, path_nodes, path_links
def find_label_correcting_path_tree(self, from_node):
"""
Find a shortest path tree rooted at from_node
by using a label correcting algorithm.
Return the tree's total cost.
"""
# Reset the network.
self.reset_network()
# Set all nodes' distances to infinity and their labels to 0.
for node in self.all_nodes:
node.distance = infinity
node.text = "0"
# Add the start node to the shortest path tree.
from_node.visited = True
from_node.distance = 0
# Make the candidate list.
candidate_links = []
# Add the start node's links to the candidate list.
for link in from_node.links:
candidate_links.append(link)
# Make a shortest path tree.
while len(candidate_links) > 0:
# Use the first link in the candidate list.
link = candidate_links.pop(0)
# See if this link improves its destination node's distance.
new_distance = link.node0.distance + link.cost
to_node = link.node1
if new_distance < to_node.distance:
# This is an improvement.
# Update the node's distance.
to_node.distance = new_distance
# Update the node's from_node and from_link.
to_node.from_node = link.node0
to_node.from_link = link
# Update the node's label.
num_updates = int(to_node.text)
num_updates += 1
to_node.text = f"{num_updates}"
# Add the node's links to the candidate list.
for new_link in to_node.links:
candidate_links.append(new_link)
# Set the visited properties for the visited nodes and links.
cost = 0
for node in self.all_nodes:
node.visited = True
if node.from_link != None:
node.from_link.visited = True
cost += node.from_link.cost
# Return the total cost.
return cost
def find_label_correcting_path(self, from_node, to_node):
"""
Find a shortest path between the two nodes
by using a label correcting algorithm.
Return the path's total cost, nodes, and links.
"""
# Build a shortest path tree.
self.find_label_correcting_path_tree(from_node)
# Follow the tree's links back from to_node to from_node.
return self.find_spanning_tree_path(from_node, to_node)
def find_all_pairs_paths(self):
""" Find all pairs shortest paths. Return the distance and via arrays."""
# Renumber the nodes.
num_nodes = len(self.all_nodes)
for i in range(num_nodes):
self.all_nodes[i].index = i
# Initialize the distance array.
distance = [[infinity for i in range(num_nodes)] for j in range(num_nodes)]
# The distance from a node to itself is 0.
for i in range(num_nodes):
distance[i][i] = 0
# Set distances for links.
for node in self.all_nodes:
for link in node.links:
from_node = link.node0.index
to_node = link.node1.index
if distance[from_node][to_node] > link.cost:
distance[from_node][to_node] = link.cost
# Initialize the via array.
via = [[-1 for i in range(num_nodes)] for j in range(num_nodes)]
# Set via[i][j] = j if there is a link from i to j.
for i in range(num_nodes):
for j in range(num_nodes):
if distance[i][j] < infinity:
via[i][j] = j
# Find improvements.
for via_node in range(num_nodes):
for from_node in range(num_nodes):
for to_node in range(num_nodes):
new_dist = \
distance[from_node][via_node] + \
distance[via_node][to_node]
if new_dist < distance[from_node][to_node]:
# This is an improved path. Update it.
distance[from_node][to_node] = new_dist
via[from_node][to_node] = via_node
return distance, via
def find_all_pairs_path(self, distance, via, start_node, end_node):
""" Return an all pairs path."""
# See if there is a path between these nodes.
if distance[start_node][end_node] == infinity:
return None
# Get the via node for this path.
via_node = via[start_node][end_node]
# Make the list to return.
path = []
# See if there is a direct connection.
if via_node == end_node:
# There is a direct connection.
# Return a list that contains only end_node.
path.append(self.all_nodes[end_node])
else:
# There is no direct connection.
# Return start_node --> via_node plus via_node --> end_node.
path.extend(self.find_all_pairs_path(distance, via, start_node, via_node))
path.extend(self.find_all_pairs_path(distance, via, via_node, end_node))
return path
class App:
def kill_callback(self):
self.window.destroy()
def __init__(self):
self.mode = Modes.none
# The nodes selected by the user while adding a link or finding a path.
self.node0 = None
self.node1 = None
# The currently loaded network.
self.the_network = Network()
# User interface.
self.window = tk.Tk()
self.window.title("network_maker")
self.window.protocol("WM_DELETE_WINDOW", self.kill_callback)
self.window.geometry("500x500")
# Menu.
menubar = tk.Menu(self.window)
self.window.config(menu=menubar)
filemenu = tk.Menu(menubar, tearoff=0)
filemenu.add_command(label="New", command=self.new, accelerator="Ctrl+N")
filemenu.add_command(label="Open", command=self.open, accelerator="Ctrl+O")
filemenu.add_command(label="Save As", command=self.save_as, accelerator="Ctrl+S")
filemenu.add_separator()
filemenu.add_command(label="Exit", command=self.kill_callback)
menubar.add_cascade(label="File", menu=filemenu)
# Tool strip.
self.buttons = []
self.images = []
toolstrip = tk.Frame(self.window)
toolstrip.pack(padx=5, pady=(5, 0), side=tk.TOP, fill=tk.X)
self.deselect_button = self.make_button(toolstrip, "deselect.png", self.deselect)
self.make_separator(toolstrip)
self.add_node_button = self.make_button(toolstrip, "add_node.png", self.add_nodes)
self.add_link_button = self.make_button(toolstrip, "add_link.png", self.add_links)
self.add_link2_button = self.make_button(toolstrip, "add_link2.png", self.add_links2)
self.make_separator(toolstrip)
self.df_traversal_button = self.make_button(toolstrip, "df_traversal.png", self.df_traversal)
self.bf_traversal_button = self.make_button(toolstrip, "bf_traversal.png", self.bf_traversal)
self.connected_components_button = self.make_button(toolstrip, "connected_components.png", self.connected_components)
self.spanning_tree_button = self.make_button(toolstrip, "spanning_tree.png", self.spanning_tree)
self.minimal_spanning_tree_button = self.make_button(toolstrip, "minimal_spanning_tree.png", self.minimal_spanning_tree)
self.any_path_button = self.make_button(toolstrip, "any_path.png", self.any_path)
self.label_setting_tree_button = self.make_button(toolstrip, "label_setting_tree.png", self.label_setting_tree)
self.label_setting_path_button = self.make_button(toolstrip, "label_setting_path.png", self.label_setting_path)
self.label_correcting_tree_button = self.make_button(toolstrip, "label_correcting_tree.png", self.label_correcting_tree)
self.label_correcting_path_button = self.make_button(toolstrip, "label_correcting_path.png", self.label_correcting_path)
self.all_pairs_button = self.make_button(toolstrip, "all_pairs.png", self.all_pairs)
# The status label.
self.status_label = tk.Label(self.window, anchor=tk.W, relief=tk.RIDGE)
self.status_label.pack(padx=5, pady=2, side=tk.BOTTOM, fill=tk.X)
# The drawing area.
self.canvas = tk.Canvas(self.window, bg="white", borderwidth=2, relief=tk.SUNKEN, cursor="crosshair")
self.canvas.pack(padx=5, pady=(5, 0), side=tk.TOP, fill=tk.BOTH, expand=True)
# Bind some keys.
self.window.bind('<Control-n>', self.key_new)
self.window.bind('<Control-o>', self.key_open)
self.window.bind('<Control-s>', self.key_save_as)
# Catch mouse clicks.
self.canvas.bind("<Button-1>", self.mouse1_click)
self.canvas.bind("<Button-3>", self.mouse3_click)
# Force focus so Alt+F4 closes this window and not the Python shell.
self.window.focus_force()
self.window.mainloop()
def key_new(self, event):
self.new()
def new(self):
""" Start a new network."""
self.the_network = Network()
self.draw_canvas()
def key_open(self, event):
self.open()
def open(self):
""" Load a network file."""
filename = askopenfilename(defaultextension=".net",
filetypes=(("Network Files", "*.net"), ("All Files", "*.*")))
if filename:
try:
# Load the network.
network = Network.load_network(filename)
# Start using the new network.
self.the_network = network
# Draw the new network.
self.draw_canvas()
except Exception as e:
messagebox.showinfo("Load Error", str(e))
return
def key_save_as(self, event):
self.save_as()
def save_as(self):
""" Save the network in a file."""
filename = asksaveasfilename(defaultextension=".net",
filetypes=(("Network Files", "*.net"), ("All Files", "*.*")))
if filename:
# Save the network.
try:
# Save the network.
self.the_network.save_network(filename)
except Exception as e:
messagebox.showinfo("Load Error", str(e))
return
def make_button(self, toolstrip, file, command):
button = tk.Button(toolstrip, command=command)
self.buttons.append(button)
image = ImageTk.PhotoImage(file=file)
self.images.append(image)
button.config(image=image)
button.pack(padx=2, pady=2, side=tk.LEFT)
return button
def make_separator(self, toolstrip):
separator = ttk.Separator(toolstrip, orient=tk.VERTICAL)
separator.pack(padx=2, pady=2, side=tk.LEFT, fill=tk.Y)
""" Toolstrip buttons."""
def deselect(self):
self.select_tool(None, Modes.none, "")
def add_nodes(self):
self.select_tool(self.add_node_button, Modes.add_node, "Click to add a node")
def add_links(self):
self.select_tool(self.add_link_button, Modes.add_link1, "Directed Link: left-click to select start node, right-click to select end node")
def add_links2(self):
self.select_tool(self.add_link2_button, Modes.add_link2, "Undirected Link: left- and right-click to select the two nodes")
def df_traversal(self):
self.select_tool(self.df_traversal_button, Modes.df_traversal, "Depth-First Traversal: click a root node")
def bf_traversal(self):
self.select_tool(self.bf_traversal_button, Modes.bf_traversal, "Breadth-First Traversal: click a root node")
def connected_components(self):
""" Find the connected components."""
# Deselect all tools.
self.select_tool(None, Modes.none, "")
# Get the components.
components = self.the_network.get_connected_components()
# Display the components.
txt = ""
for component in components:
component_txt = ""
for component_node in component:
component_txt += f" {component_node}"
txt += "{" + component_txt[1:] + "} "
self.status_label["text"] = txt
# Redraw the network.
self.draw_canvas()
def spanning_tree(self):
self.select_tool(self.spanning_tree_button, Modes.spanning_tree, "Spanning Tree")
def minimal_spanning_tree(self):
self.select_tool(self.minimal_spanning_tree_button, Modes.minimal_spanning_tree, "Minimal Spanning Tree")
def any_path(self):
self.select_tool(self.any_path_button, Modes.any_path, "Any Path")
def label_setting_tree(self):
self.select_tool(self.label_setting_tree_button, Modes.label_setting_tree, "Label Setting Tree")
def label_setting_path(self):
self.select_tool(self.label_setting_path_button, Modes.label_setting_path, "Label Setting Path")
def label_correcting_tree(self):
self.select_tool(self.label_correcting_tree_button, Modes.label_correcting_tree, "Label Correcting Tree")
def label_correcting_path(self):
self.select_tool(self.label_correcting_path_button, Modes.label_correcting_path, "Label Correcting Path")
def all_pairs(self):
# Deselect all tools.
self.select_tool(None, Modes.none, "")
self.status_label["text"] = "Working..."
self.status_label.update()
# Find all pairs shortest paths.
distance, via = self.the_network.find_all_pairs_paths()
# Display the arrays.
print("\nFinal arrays:")
print("Distance:")
print(array_to_string(distance, 4, False))
print("Via:")
print(array_to_string(via, 3, True))
# Display all of the paths.
num_nodes = len(self.the_network.all_nodes)
for i in range(num_nodes):
for j in range(num_nodes):
if i != j:
path = self.the_network.find_all_pairs_path(distance, via, i, j)
start_node = self.the_network.all_nodes[i]
end_node = self.the_network.all_nodes[j]
print(f"{start_node} --> {end_node} ", end="")
if path == None:
print("No path")
else:
print(f"[{distance[i][j]}] : ", end="")
for via_node in path:
print(f"{via_node} ", end="")
print()
self.status_label["text"] = "See the Output Window for results."
def select_tool(self, button, mode, status):
""" Select this tool."""
if self.mode == mode:
self.mode = Modes.none
button = None
self.untoggle_buttons(button)
self.mode = mode
self.status_label["text"] = status
# Reset and redraw the network.
self.the_network.reset_network()
self.draw_canvas()
def untoggle_buttons(self, button):
""" Untoggle all buttons except this one."""
for test_buttton in self.buttons:
if test_buttton == button:
test_buttton.configure(relief=tk.SUNKEN)
else:
test_buttton.configure(relief=tk.RAISED)
# Clear the status label.
self.status_label["text"] = ""
def draw_canvas(self):
""" Draw the network."""
self.canvas.delete(tk.ALL)
show_node_text = False
show_costs = False
show_capacities = False
self.the_network.draw(self.canvas, show_node_text, show_costs, show_capacities,
1, "blue", "blue", 3, "red", "red",
"white", "blue", "blue",
"lightblue", "red", "red")
def mouse1_click(self, event):
self.mouse_click(event, 1)
def mouse3_click(self, event):
self.mouse_click(event, 3)
def mouse_click(self, event, button_number):
""" Add a node or link if appropriate."""
location = (event.x, event.y)
if self.mode == Modes.add_node:
# Add a node.
index = len(self.the_network.all_nodes)
name = number_to_letters(index)
node = Node(name, location, index)
self.the_network.all_nodes.append(node)
self.draw_canvas()
elif (
(self.mode == Modes.add_link1) or
(self.mode == Modes.add_link2)):
# Add a link.
# See if there is a node here.
node = self.the_network.find_node(location)
if node == None:
beep()
else:
if button_number == 1:
self.node0 = node
else:
self.node1 = node
# See if we have both nodes.
if ((self.node0 != None) and (self.node1 != None)):
# See if the nodes are the same.
if self.node0 == self.node1:
beep()
else:
# Make the link.
if self.mode == Modes.add_link1:
self.the_network.make_link(self.node0, self.node1)
else:
self.the_network.make_links(self.node0, self.node1)
# We're done with this link.
self.node0 = None
self.node1 = None
# Redraw.
self.draw_canvas()
elif self.mode == Modes.df_traversal:
# See if there is a node here.
node = self.the_network.find_node(location)
if node == None:
beep()
else:
# Traverse the network.
traversal, is_connected = self.the_network.depth_first_traverse(node)
# Display the traversal.
txt = "Traversal: "
for traversal_node in traversal:
txt += f" {traversal_node}"
if is_connected:
txt += " Connected."
else:
txt += " Not connected."
self.status_label["text"] = txt
# Redraw the network.
self.draw_canvas()
elif self.mode == Modes.bf_traversal:
# See if there is a node here.
node = self.the_network.find_node(location)
if node == None:
beep()
else:
# Traverse the network.
traversal, is_connected = self.the_network.breadth_first_traverse(node)
# Display the traversal.
txt = "Traversal: "
for traversal_node in traversal:
txt += f" {traversal_node}"
if is_connected:
txt += " Connected."
else:
txt += " Not connected."
self.status_label["text"] = txt
# Redraw the network.
self.draw_canvas()
elif self.mode == Modes.spanning_tree:
# See if there is a node here.
node = self.the_network.find_node(location)
if node == None:
beep()
else:
# Build a spanning tree.
cost, is_connected = self.the_network.make_spanning_tree(node)
if is_connected:
txt = "Connected. "
else:
txt = "Not connected. "
txt += f"Total cost: {cost}"
self.status_label["text"] = txt
# Redraw the network.
self.draw_canvas()
elif self.mode == Modes.minimal_spanning_tree:
# See if there is a node here.
node = self.the_network.find_node(location)
if node == None:
beep()
else:
# Build a spanning tree.
cost, is_connected = self.the_network.make_minimal_spanning_tree(node)
if is_connected:
txt = "Connected. "
else:
txt = "Not connected. "
txt += f"Total cost: {cost}"
self.status_label["text"] = txt
# Redraw the network.
self.draw_canvas()
elif self.mode == Modes.any_path:
# See if there is a node here.
node = self.the_network.find_node(location)
if node == None:
beep()
else:
if button_number == 1:
self.node0 = node
else:
self.node1 = node
# See if we have both nodes.
if ((self.node0 != None) and (self.node1 != None)):
# See if the nodes are the same.
if self.node0 == self.node1:
beep()
else:
# Find a path between the nodes.
cost, path_nodes, path_links = self.the_network.find_any_path(self.node0, self.node1)
txt = "Path: "
for path_node in path_nodes:
txt += f"{path_node} "
txt += f"Total cost: {cost}"
self.status_label["text"] = txt
# Redraw the network.
self.draw_canvas()
elif self.mode == Modes.label_setting_tree:
# See if there is a node here.
node = self.the_network.find_node(location)
if node == None:
beep()
else:
# Build a label setting shortest path tree.
cost = self.the_network.find_label_setting_path_tree(node)
txt = f"Total cost: {cost}"
self.status_label["text"] = txt
# Redraw the network.
self.draw_canvas()
elif self.mode == Modes.label_setting_path:
# See if there is a node here.
node = self.the_network.find_node(location)
if node == None:
beep()
else:
if button_number == 1:
self.node0 = node
else:
self.node1 = node
# See if we have both nodes.
if ((self.node0 != None) and (self.node1 != None)):
# See if the nodes are the same.
if self.node0 == self.node1:
beep()
else:
# Find a path between the nodes.
cost, path_nodes, path_links = self.the_network.find_label_setting_path(self.node0, self.node1)
txt = "Path: "
for path_node in path_nodes:
txt += f"{path_node} "
txt += f"Total cost: {cost}"
self.status_label["text"] = txt
# Redraw the network.
self.draw_canvas()
elif self.mode == Modes.label_correcting_tree:
# See if there is a node here.
node = self.the_network.find_node(location)
if node == None:
beep()
else:
# Build a label setting shortest path tree.
cost = self.the_network.find_label_correcting_path_tree(node)
txt = f"Total cost: {cost}"
self.status_label["text"] = txt
# Redraw the network.
self.draw_canvas()
elif self.mode == Modes.label_correcting_path:
# See if there is a node here.
node = self.the_network.find_node(location)
if node == None:
beep()
else:
if button_number == 1:
self.node0 = node
else:
self.node1 = node
# See if we have both nodes.
if ((self.node0 != None) and (self.node1 != None)):
# See if the nodes are the same.
if self.node0 == self.node1:
beep()
else:
# Find a path between the nodes.
cost, path_nodes, path_links = self.the_network.find_label_correcting_path(self.node0, self.node1)
txt = "Path: "
for path_node in path_nodes:
txt += f"{path_node} "
txt += f"Total cost: {cost}"
self.status_label["text"] = txt
# Redraw the network.
self.draw_canvas()
if __name__ == '__main__':
app = App()
# app.root.destroy()
|
StarcoderdataPython
|
8179936
|
<reponame>Tea-n-Tech/chia-tea
from chia.rpc.harvester_rpc_client import HarvesterRpcClient
from chia.util.config import load_config
from chia.util.default_root import DEFAULT_ROOT_PATH
from chia.util.ints import uint16
from ....models.ChiaWatchdog import ChiaWatchdog
from ....utils.logger import log_runtime_async
from .shared_settings import API_EXCEPTIONS
@log_runtime_async(__file__)
async def update_from_harvester(chia_dog: ChiaWatchdog):
"""Updates the chia dog with harvester data
Parameters
----------
chia_dog : ChiaWatchdog
watchdog instance to be modified
"""
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml", exit_on_error=False)
self_hostname = config["self_hostname"]
harvester_rpc_port = config["harvester"]["rpc_port"]
harvester_client = await HarvesterRpcClient.create(
self_hostname, uint16(harvester_rpc_port), DEFAULT_ROOT_PATH, config
)
plots_response = await harvester_client.get_plots()
chia_dog.harvester_service.is_running = True
if plots_response["success"]:
chia_dog.harvester_service.plots = plots_response["plots"]
chia_dog.harvester_service.failed_to_open_filenames = plots_response[
"failed_to_open_filenames"
]
chia_dog.harvester_service.not_found_filenames = plots_response["not_found_filenames"]
chia_dog.harvester_service.plot_directories = await harvester_client.get_plot_directories()
# pylint: disable=catching-non-exception
except API_EXCEPTIONS:
chia_dog.harvester_service.is_running = False
finally:
if "harvester_client" in locals():
harvester_client.close()
await harvester_client.await_closed()
chia_dog.harvester_service.is_ready = True
|
StarcoderdataPython
|
246992
|
"""\
This implements a command line interpreter (CLI) for the concur API.
OAuth data is kept in a JSON file, for easy portability between different
programming languages.
Currently, the initialization of OAuth requires the user to copy a URL
into a web browser, then copy the URL of the resulting page back to this
script.
"""
copyright = """
Copyright (c) 2013 <NAME> <<EMAIL>>
All Rights Reserved.
Licensed under the Academic Free License (AFL 3.0)
http://opensource.org/licenses/afl-3.0
"""
from argparse import ArgumentParser
from cmd import Cmd as _Cmd
from datetime import datetime
from functools import wraps as _wraps
from pprint import pprint as _pprint
import json as _json
import re
from ValidateElements import *
try:
from concur import ConcurClient, ConcurAPIError
except ImportError:
from sys import path
from os.path import join, normpath
# Try looking in the parent of this script's directory.
path.insert(0, normpath(join(path[0], '..')))
from concur import ConcurClient, ConcurAPIError
import concur._xml2json as x2j
def mk_parser(*args):
parser = ArgumentParser(
prog='',
description='',
add_help=False,
)
head_is_string = lambda lst: isinstance(lst[0], basestring)
args = list(args)
args.append(None) # sentinel value
while args:
dest = args.pop(0)
if dest is None:
break
flags = {} if head_is_string(args) else args.pop(0)
if isinstance(dest, basestring):
dest = (dest,)
parser.add_argument(*dest, **flags)
return parser
def _syntax(parser, dont_split=False, f=None):
'''Decorator that accepts an ArgumentParser, then mutates a
function that is accepts a string to instead accept a Namespace.'''
if f is None:
from copy import copy
from functools import partial
return partial(_syntax, copy(parser), dont_split)
parser.prog = f.func_name[3:]
parser.description = f.func_doc or 'No description available'
f.func_doc = parser.format_help()
@_wraps(f)
def wrapper(self, line):
args = [line] if dont_split else line.split()
return f(self, parser.parse_args(args))
return wrapper
return decorator
def _get(filename, default):
from os.path import expanduser
return expanduser(filename if filename else default)
def _set(name, definition, dict, allow_creation=True):
'''Helper function to set a value in a hash'''
def show(item):
'''Helper function to display a key-value pair'''
if isinstance(item[1], basestring):
print '%s: %s' % item
if name is None:
for item in sorted(dict.items()):
show(item)
elif len(definition) == 0:
try:
show((name, dict[name]))
except KeyError:
pass
elif allow_creation or isinstance(dict.get(name), basestring):
dict[name] = ' '.join(definition)
else:
print 'unknown key %r' % parsed[0]
def _unset(names, dict):
'''Helper function to remove a value from a hash'''
for name in names:
try:
del dict[name]
except KeyError:
pass
no_args = mk_parser()
filename = mk_parser('filename', {'nargs':'?'})
value = mk_parser('value', {'nargs':'?'})
define = mk_parser('name', {'nargs':'?'}, 'definition', {'nargs':'*'})
undefine = mk_parser('names', {'nargs':'+'})
key_value = lambda x: x.split('=', 1) # turn 'foo=bar' into ('foo', 'bar')
http_request = mk_parser('path', {'nargs':'+'},
('-o', '--options'), {'nargs':'*', 'type': key_value, 'default': ()})
options = mk_parser('options', {'nargs':'*', 'type': key_value, 'default': ()})
class ConcurCmd(_Cmd):
config_file = '~/.concur_cli.rc'
oauth_file = '~/concur_oauth.json'
def __init__(self, config_file=None):
'''Initializes the interpreter.'''
self.client = ConcurClient()
self.aliases = {}
self.open_files = []
self.do_load(self.config_file)
return _Cmd.__init__(self)
def onecmd(self, line):
try:
return _Cmd.onecmd(self, line)
except ConcurAPIError as error:
print "%s: %s" % (type(error).__name__, error[0])
print error[1]
except Exception as error:
print "%s: %s" % (type(error).__name__, error)
import traceback
traceback.print_exc()
def default(self, line):
'''Handle aliases.'''
parts = line.split(None, 1)
if len(parts) > 0 and parts[0] in self.aliases:
newline = self.aliases[parts[0]]
if len(parts) > 1:
newline += ' ' + parts[1]
return self.onecmd(newline)
return _Cmd.default(self, line)
# Simple commands
@_syntax(no_args)
def do_quit(self, namespace):
'''Exits the interpreter.'''
return True
@_syntax(no_args)
def do_copyright(self, namespace):
'''Displays copyright and licensing information.'''
print copyright
@_syntax(no_args)
def do_examples(self, namespace):
'''Displays example commands.'''
print '''\
These are some commands to try.
\tget_Forms
\tget_Forms FormCode=RPTINFO
\tget_Fields FormId=n5oqVNsQ$soy2ftQuy$sU9oHBDNCFPyPQr9
\tcreate_report Name=MMMM+Expenses Purpose=All+expenses+for+MMM,+YYYY Comment=Includes+Client+Meetings. UserDefinedDate=YYYY-MM-DD+HH:MM:SS.0
\tget expense expensereport v2.0 Reports -o status=ACTIVE ReportCurrency=USD
\tget expense expensereport v2.0 report <ReportID>'''
@_syntax(value, dont_split=True)
def do_note(self, namespace):
'''Comment.'''
pass
@_syntax(value, dont_split=True)
def do_echo(self, namespace):
'''Displays information to the user.'''
print namespace.value
# Commands related to aliases.
@_syntax(define)
def do_alias(self, namespace):
'''Manage aliases.'''
_set(namespace.name, namespace.definition, self.aliases)
@_syntax(undefine)
def do_unalias(self, namespace):
'''Delete aliases.'''
_unset(namespace.names, self.aliases)
@_syntax(filename, dont_split=True)
def do_save(self, namespace):
'''Save the current configuration as a list of commands.'''
config_file = _get(namespace.filename, self.config_file)
with open(config_file, 'w') as config:
for item in self.aliases.items():
print >>config, 'alias %s %s' % item
#print >>config, 'oload %s' % self.oauth_file # TODO
@_syntax(filename, dont_split=True)
def do_load(self, namespace):
'''Run commands from a file.'''
from os.path import exists, expanduser, join
config_file = _get(namespace.filename, self.config_file)
if config_file in self.open_files:
print 'already processing %s' % config_file
return
if exists(config_file):
self.open_files.append(config_file)
with open(config_file, 'r') as config:
for line in config:
self.onecmd(line)
self.open_files.pop()
# Commands related to OAuth.
@_syntax(value, dont_split=True)
def do_client_id(self, namespace):
'''Displays or sets the value.'''
if namespace.value:
self.client.client_id = namespace.value
elif self.client.client_id:
print 'client_id =', self.client.client_id
else:
print 'The client id is not set.'
@_syntax(value, dont_split=True)
def do_client_secret(self, namespace):
'''Displays or sets the value.'''
if namespace.value:
self.client.client_secret = namespace.value
elif self.client.client_secret:
print 'client_secret =', self.client.client_secret
else:
print 'The client secret is not set.'
@_syntax(value, dont_split=True)
def do_access_token(self, namespace):
'''Displays or sets the value.'''
from urlparse import urlparse, parse_qs
client = self.client
if namespace.value:
parts = urlparse(namespace.value)
code = parse_qs(parts.query)['code'][0]
client.access_token = client.get_oauth_token(code)
elif client.access_token:
print 'access_token =', client.access_token
else:
print 'The access token is not set.'
print 'Enter the URL below in a web browser and follow the instructions.'
print ' ', client.build_oauth_url()
print 'Once the web browser redirects, copy the complete URL and'
print 'use it to re-run this command.'
@_syntax(filename, dont_split=True)
def do_osave(self, namespace):
'''Saves OAuth information into a JSON file.'''
oauth_file = _get(namespace.filename, self.oauth_file)
with open(oauth_file, 'w') as fp:
_json.dump(self.client.__dict__, fp)
@_syntax(filename, dont_split=True)
def do_oload(self, namespace):
'''Loads OAuth information from a JSON file.'''
from os.path import exists, expanduser, join
oauth_file = _get(namespace.filename, self.oauth_file)
if exists(oauth_file):
with open(oauth_file, 'r') as fp:
self.client.__dict__.update(_json.load(fp))
# Commands related to the REST API.
@_syntax(http_request)
def do_get(self, namespace):
'''Issues an HTTP GET request'''
_pprint(self.client.get('/'.join(namespace.path), **dict(namespace.options)))
@_syntax(http_request)
def do_post(self, namespace):
'''Issues an HTTP POST request'''
_pprint(self.client.post('/'.join(namespace.path))) #, **namespace.options))
# Commands specific to Concur.
@_syntax(options)
def do_create_report(self, namespace):
'''Creates a new expense report'''
_pprint(self.client.post(
'expense/expensereport/v1.1/Report',
Report=validate_report_elements(namespace.options),
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03',
))
@_syntax(options)
def do_quickexpense(self, namespace):
'''Creates a new quick expense'''
_pprint(self.client.post(
'expense/expensereport/v1.0/quickexpense/',
Report=validate_quickexpense_elements(namespace.options),
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2010/09',
))
# See also: https://developer.concur.com/api-documentation/draft-documentation/attendee-resource-draft/attendee-resource-get-draft
@_syntax(options)
def do_get_attendees_by_id(self, namespace):
'''Get attendees_by_id''' # TODO
options = validate_attendees_by_id(namespace.options)
_pprint(self.client.get(
'expense/v2.0/attendees/{attendees id}' % options,
))
# See also: https://developer.concur.com/api-documentation/draft-documentation/e-receipt-service-developer-preview/e-receipt-or-e-invoice-res
@_syntax(options)
def do_get_e_receiptandinvoice_by_id(self, namespace):
'''Get e-receiptandinvoice_by_id''' # TODO
options = validate_e_receiptandinvoice_by_id(namespace.options)
_pprint(self.client.get(
'e-receiptandinvoice/v1.0/{e-receiptandinvoice id}' % options,
))
# See also: https://developer.concur.com/api-documentation/new-portal-format/expense-report-web-service-new-format/company-card-transaction-0
@_syntax(options)
def do_get_CardCharges(self, namespace):
'''Get CardCharges''' # TODO
options = validate_CardCharges(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v1.1/CardCharges' % options,
))
# See also: https://developer.concur.com/api-documentation/new-portal-format/expense-report-web-service-new-format/expense-delegator-resour-0
@_syntax(options)
def do_get_Delegators(self, namespace):
'''Get Delegators''' # TODO
options = validate_Delegators(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v1.1/Delegators' % options,
))
# See also: https://developer.concur.com/api-documentation/new-portal-format/expense-report-web-service-new-format/expense-entry-attendee-r-0
@_syntax(options)
def do_get_Attendees(self, namespace):
'''Get Attendees''' # TODO
options = validate_Attendees(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v1.1/report/{report id}/entry/{entry id}/Attendees' % options,
))
@_syntax(options)
def do_get_Attendees_by_id(self, namespace):
'''Get Attendees_by_id''' # TODO
options = validate_Attendees_by_id(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v1.1/report/{report id}/entry/{entry id}/Attendees/{Attendees id}' % options,
))
# See also: https://developer.concur.com/api-documentation/new-portal-format/expense-report-web-service-new-format/expense-entry-attendee-r-1
@_syntax(options)
def do_post_Attendees(self, namespace):
'''Post Attendees''' # TODO
options = validate_Attendees(namespace.options)
_pprint(self.client.post(
'expense/expensereport/v1.1/report/{report id}/entry/{entry id}/Attendees' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
@_syntax(options)
def do_post_Attendees_1(self, namespace):
'''Post Attendees_1''' # TODO
options = validate_Attendees_1(namespace.options)
_pprint(self.client.post(
'expense/expensereport/v1.1/report/{report id}/entry/{entry id}/Attendees' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
# See also: https://developer.concur.com/api-documentation/new-portal-format/expense-report-web-service-new-format/expense-entry-itemizatio-0
@_syntax(options)
def do_post_Itemization(self, namespace):
'''Post Itemization''' # TODO
options = validate_Itemization(namespace.options)
_pprint(self.client.post(
'expense/expensereport/v1.1/report/{report id}/entry/{entry id}/Itemization' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
# See also: https://developer.concur.com/api-documentation/new-portal-format/expense-report-web-service-new-format/expense-entry-resource/exp
@_syntax(options)
def do_get_entry_by_id(self, namespace):
'''Get entry_by_id''' # TODO
options = validate_entry_by_id(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v1.1/report/{report id}/entry/{entry id}' % options,
))
# See also: https://developer.concur.com/api-documentation/new-portal-format/expense-report-web-service-new-format/expense-report-header-re-0
@_syntax(options)
def do_post_report(self, namespace):
'''Post report''' # TODO
options = validate_report(namespace.options)
_pprint(self.client.post(
'expense/expensereport/v1.1/api/expense/expensereport/v1.1/report' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
@_syntax(options)
def do_post_batch(self, namespace):
'''Post batch''' # TODO
options = validate_batch(namespace.options)
_pprint(self.client.post(
'expense/expensereport/v1.1/api/expense/expensereport/v1.1/report/batch' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
# See also: https://developer.concur.com/api-documentation/new-portal-format/travel-profile-web-service-new-format/form-payment-resource/form
@_syntax(options)
def do_get_fop(self, namespace):
'''Get fop''' # TODO
options = validate_fop(namespace.options)
_pprint(self.client.get(
'travelprofile/v1.0/fop' % options,
))
# See also: https://developer.concur.com/api-documentation/new-portal-format/travel-profile-web-service-new-format/loyalty-program-resource/l
@_syntax(options)
def do_post_loyalty(self, namespace):
'''Post loyalty''' # TODO
options = validate_loyalty(namespace.options)
_pprint(self.client.post(
'travelprofile/v1.0/loyalty' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
@_syntax(options)
def do_post_loyalty_1(self, namespace):
'''Post loyalty_1''' # TODO
options = validate_loyalty_1(namespace.options)
_pprint(self.client.post(
'travelprofile/v1.0/loyalty' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
# See also: https://developer.concur.com/api-documentation/oauth-20-0
@_syntax(options)
def do_get_User(self, namespace):
'''Get User''' # TODO
options = validate_User(namespace.options)
_pprint(self.client.get(
'user/v1.0/User' % options,
))
@_syntax(options)
def do_get_User_1(self, namespace):
'''Get User_1''' # TODO
options = validate_User_1(namespace.options)
_pprint(self.client.get(
'user/v1.0/User' % options,
))
# See also: https://developer.concur.com/api-documentation/web-services/attendee/attendee-resource/attendee-resource-get
@_syntax(options)
def do_get_attendees_by_id_1(self, namespace):
'''Get attendees_by_id_1''' # TODO
options = validate_attendees_by_id_1(namespace.options)
_pprint(self.client.get(
'expense/v2.0/attendees/{attendees id}' % options,
))
# See also: https://developer.concur.com/api-documentation/web-services/attendee-list/attendee-type-resource/attendee-type-resource-get
@_syntax(options)
def do_get_type(self, namespace):
'''Get type''' # TODO
options = validate_type(namespace.options)
_pprint(self.client.get(
'expense/attendee/v1.0/type' % options,
))
# See also: https://developer.concur.com/api-documentation/web-services/expense-report/expense-entry-attendee-resource/v20-expense-entry-atte
@_syntax(options)
def do_get_attendees(self, namespace):
'''Get attendees''' # TODO
options = validate_attendees(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v2.0/report/{report id}/entry/{entry id}/attendees' % options,
))
@_syntax(options)
def do_get_Attendees_1(self, namespace):
'''Get Attendees_1''' # TODO
options = validate_Attendees_1(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v2.0/report/{report id}/entry/{entry id}/Attendees' % options,
))
# See also: https://developer.concur.com/api-documentation/web-services/expense-report/expense-entry-resource/expense-entry-resource-post
@_syntax(options)
def do_post_entry(self, namespace):
'''Post entry''' # TODO
options = validate_entry(namespace.options)
_pprint(self.client.post(
'expense/expensereport/v1.1/report/{report id}/entry' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
# See also: https://developer.concur.com/api-documentation/web-services/expense-report/expense-form-field-resource/expense-form-field-resourc
@_syntax(options)
def do_get_Fields(self, namespace):
'''Retrieves the details of the configured form fields for the specified form'''
options = validate_Fields(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v1.1/report/Form/%(FormId)s/Fields' % options,
))
# See also: https://developer.concur.com/api-documentation/web-services/expense-report/expense-form-resource/expense-form-resource-get
@_syntax(options)
def do_get_Forms(self, namespace):
'''Retrieves the list of configured form types or the configured forms for the specified form type'''
options = validate_Forms(namespace.options)
options.setdefault('FormCode', '')
_pprint(self.client.get(
'expense/expensereport/v1.1/report/Forms/%(FormCode)s' % options,
))
# See also: https://developer.concur.com/api-documentation/web-services/expense-report/expense-group-configuration-resource/expense-group-con
@_syntax(options)
def do_get_expensereport_by_id(self, namespace):
'''Get expensereport_by_id''' # TODO
options = validate_expensereport_by_id(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v1.1/{expensereport id}' % options,
))
# See also: https://developer.concur.com/api-documentation/web-services/expense-report/expense-report-resource/expense-report-resource-get
@_syntax(options)
def do_get_Reports(self, namespace):
'''Get Reports''' # TODO
options = validate_Reports(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v2.0/Reports' % options,
))
@_syntax(options)
def do_get_Reports_1(self, namespace):
'''Get Reports_1''' # TODO
options = validate_Reports_1(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v2.0/Reports' % options,
))
@_syntax(options)
def do_get_Reports_2(self, namespace):
'''Get Reports_2''' # TODO
options = validate_Reports_2(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v2.0/Reports' % options,
))
@_syntax(options)
def do_get_Reports_3(self, namespace):
'''Get Reports_3''' # TODO
options = validate_Reports_3(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v2.0/Reports' % options,
))
@_syntax(options)
def do_get_Reports_4(self, namespace):
'''Get Reports_4''' # TODO
options = validate_Reports_4(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v2.0/Reports' % options,
))
@_syntax(options)
def do_get_Reports_5(self, namespace):
'''Get Reports_5''' # TODO
options = validate_Reports_5(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v2.0/Reports' % options,
))
@_syntax(options)
def do_get_Reports_6(self, namespace):
'''Get Reports_6''' # TODO
options = validate_Reports_6(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v2.0/Reports' % options,
))
@_syntax(options)
def do_get_Reports_7(self, namespace):
'''Get Reports_7''' # TODO
options = validate_Reports_7(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v2.0/Reports' % options,
))
@_syntax(options)
def do_get_Reports_8(self, namespace):
'''Get Reports_8''' # TODO
options = validate_Reports_8(namespace.options)
_pprint(self.client.get(
'expense/expenserepo/v2.0/Reports' % options,
))
@_syntax(options)
def do_get_report_by_id(self, namespace):
'''Get report_by_id''' # TODO
options = validate_report_by_id(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v2.0/report/{report id}' % options,
))
# See also: https://developer.concur.com/api-documentation/web-services/expense-report/expense-report-resource/expense-report-resource-post
@_syntax(options)
def do_post_Exceptions(self, namespace):
'''Post Exceptions''' # TODO
options = validate_Exceptions(namespace.options)
_pprint(self.client.post(
'expense/expensereport/v1.1/report/{report id}/Exceptions' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
@_syntax(options)
def do_post_submit(self, namespace):
'''Post submit''' # TODO
options = validate_submit(namespace.options)
_pprint(self.client.post(
'expense/expensereport/v1.1/report/{report id}/submit' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
@_syntax(options)
def do_post_workflowaction(self, namespace):
'''Post workflowaction''' # TODO
options = validate_workflowaction(namespace.options)
_pprint(self.client.post(
'expense/expensereport/v1.1/report/{report id}/workflowaction' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
# See also: https://developer.concur.com/api-documentation/web-services/expense-report/integration-status-resource/integration-status-resourc
@_syntax(options)
def do_post_report_by_id(self, namespace):
'''Post report_by_id''' # TODO
options = validate_report_by_id(namespace.options)
_pprint(self.client.post(
'expense/expensereport/v2.0/integrationstatus/report/{report id}' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
# See also: https://developer.concur.com/api-documentation/web-services/expense-report/location-resource/location-resource-get
@_syntax(options)
def do_get_expensereport_by_id_1(self, namespace):
'''Get expensereport_by_id_1''' # TODO
options = validate_expensereport_by_id_1(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v1.1/{expensereport id}' % options,
))
# See also: https://developer.concur.com/api-documentation/web-services/extract/extract-definition-resource/extract-definition-resource-get
@_syntax(options)
def do_get_v10(self, namespace):
'''Get v1.0''' # TODO
options = validate_v10(namespace.options)
_pprint(self.client.get(
'expense/extract/v1.0' % options,
))
@_syntax(options)
def do_get_extract_by_id(self, namespace):
'''Get extract_by_id''' # TODO
options = validate_extract_by_id(namespace.options)
_pprint(self.client.get(
'expense/extract/v1.0/{extract id}' % options,
))
# See also: https://developer.concur.com/api-documentation/web-services/extract/extract-file-resource/extract-file-resource-get
@_syntax(options)
def do_get_file(self, namespace):
'''Get file''' # TODO
options = validate_file(namespace.options)
_pprint(self.client.get(
'expense/extract/v1.0/{extract id}/job/{job id}/file' % options,
))
# See also: https://developer.concur.com/api-documentation/web-services/extract/extract-job-resource/extract-job-resource-get
@_syntax(options)
def do_get_job(self, namespace):
'''Get job''' # TODO
options = validate_job(namespace.options)
_pprint(self.client.get(
'expense/extract/v1.0/{extract id}/job' % options,
))
@_syntax(options)
def do_get_job_by_id(self, namespace):
'''Get job_by_id''' # TODO
options = validate_job_by_id(namespace.options)
_pprint(self.client.get(
'expense/extract/v1.0/{extract id}/job/{job id}' % options,
))
@_syntax(options)
def do_get_status(self, namespace):
'''Get status''' # TODO
options = validate_status(namespace.options)
_pprint(self.client.get(
'expense/extract/v1.0/{extract id}/job/{job id}/status' % options,
))
# See also: https://developer.concur.com/api-documentation/web-services/extract/extract-job-resource/extract-job-resource-post
@_syntax(options)
def do_post_job(self, namespace):
'''Post job''' # TODO
options = validate_job(namespace.options)
_pprint(self.client.post(
'expense/extract/v1.0/{extract id}/job' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
# See also: https://developer.concur.com/api-documentation/web-services/imaging/image-resource/image-resource-post
@_syntax(options)
def do_post_receipt(self, namespace):
'''Post receipt''' # TODO
options = validate_receipt(namespace.options)
_pprint(self.client.post(
'image/v1.0/receipt' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
@_syntax(options)
def do_post_expenseentry_by_id(self, namespace):
'''Post expenseentry_by_id''' # TODO
options = validate_expenseentry_by_id(namespace.options)
_pprint(self.client.post(
'image/v1.0/expenseentry/{expenseentry id}' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
@_syntax(options)
def do_post_invoice_by_id(self, namespace):
'''Post invoice_by_id''' # TODO
options = validate_invoice_by_id(namespace.options)
_pprint(self.client.post(
'image/v1.1/invoice/{invoice id}' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
@_syntax(options)
def do_post_report_by_id_1(self, namespace):
'''Post report_by_id_1''' # TODO
options = validate_report_by_id_1(namespace.options)
_pprint(self.client.post(
'image/v1.0/report/{report id}' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
# See also: https://developer.concur.com/api-documentation/web-services/imaging/image-url-resource/image-url-resource-get
@_syntax(options)
def do_get_receipt_by_id(self, namespace):
'''Get receipt_by_id''' # TODO
options = validate_receipt_by_id(namespace.options)
_pprint(self.client.get(
'image/v1.0/receipt/{receipt id}' % options,
))
@_syntax(options)
def do_get_report_by_id_1(self, namespace):
'''Get report_by_id_1''' # TODO
options = validate_report_by_id_1(namespace.options)
_pprint(self.client.get(
'image/v1.0/report/{report id}' % options,
))
@_syntax(options)
def do_get_expenseentry_by_id(self, namespace):
'''Get expenseentry_by_id''' # TODO
options = validate_expenseentry_by_id(namespace.options)
_pprint(self.client.get(
'image/v1.0/expenseentry/{expenseentry id}' % options,
))
@_syntax(options)
def do_get_invoice_by_id(self, namespace):
'''Get invoice_by_id''' # TODO
options = validate_invoice_by_id(namespace.options)
_pprint(self.client.get(
'image/v1.0/invoice/{invoice id}' % options,
))
# See also: https://developer.concur.com/api-documentation/web-services/quick-expense/quick-expense-resource/quick-expense-resource-get
@_syntax(options)
def do_get_quickexpense(self, namespace):
'''Get quickexpense''' # TODO
options = validate_quickexpense(namespace.options)
_pprint(self.client.get(
'expense/expensereport/v1.0/quickexpense' % options,
))
# See also: https://developer.concur.com/api-documentation/web-services/quick-expense/quick-expense-resource/quick-expense-resource-post
@_syntax(options)
def do_post_quickexpense(self, namespace):
'''Post quickexpense''' # TODO
options = validate_quickexpense(namespace.options)
_pprint(self.client.post(
'expense/expensereport/v1.0/quickexpense' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
@_syntax(options)
def do_post_quickexpense_1(self, namespace):
'''Post quickexpense_1''' # TODO
options = validate_quickexpense_1(namespace.options)
_pprint(self.client.post(
'expense/expensereport/v1.0/quickexpense' % options,
RootTag=options, # TODO
_xmlns='http://www.concursolutions.com/api/expense/expensereport/2011/03', # TODO
))
def main(argv=None):
if argv is None:
import sys
argv = sys.argv[1:]
ConcurCmd().cmdloop()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1710544
|
<gh_stars>1-10
# Generated by Django 3.0.8 on 2020-08-02 07:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('person', '0014_auto_20200728_2104'),
]
operations = [
migrations.AddField(
model_name='person',
name='resume',
field=models.FileField(blank=True, null=True, upload_to='resume/'),
),
]
|
StarcoderdataPython
|
1808654
|
## TODO: define the convolutional neural network architecture
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# can use the below import should you choose to initialize the weights of your Net
import torch.nn.init as I
# maxpool layer
max_k = 2
max_s = 2
# convolutional layer 1
W_1 = 224 # length of one side of image; per resized shape from notebook 1
F_1 = 5 # length of one side of kernel
S_1 = 1 # default stride
Out_feat_1 = 32
Out_dim_1 = (W_1 - F_1) / (S_1) + 1
# print('Output size before max pool:\t(', Out_feat_1, ', ', Out_dim_1, ', ', Out_dim_1, ')')
# print('Output size after max pool:\t(', Out_feat_1, ', ', (Out_dim_1 / max_s), ', ', (Out_dim_1 / max_s), ')')
# convolutional layer 2
W_2 = Out_dim_1 / max_s
F_2 = 3 # length of one side of kernel
S_2 = 1 # default stride
Out_feat_2 = 128
Out_dim_2 = (W_2 - F_2) / (S_2) + 1
# print('Output size before max pool:\t(', Out_feat_2, ', ', Out_dim_2, ', ', Out_dim_2, ')')
# print('Output size after max pool:\t(', Out_feat_2, ', ', (Out_dim_2 / max_s), ', ', (Out_dim_2 / max_s), ')')
# convolutional layer 3
W_3 = Out_dim_2 / max_s
F_3 = 4 # length of one side of kernel
S_3 = 1 # default stride
Out_feat_3 = 512
Out_dim_3 = (W_3 - F_3) / (S_3) + 1
# print('Output size before max pool:\t(', Out_feat_2, ', ', Out_dim_2, ', ', Out_dim_2, ')')
# print('Output size after max pool:\t(', Out_feat_2, ', ', (Out_dim_2 / max_s), ', ', (Out_dim_2 / max_s), ')')
# convolutional layer 4
W_4 = Out_dim_3 / max_s
F_4 = 5 # length of one side of kernel
S_4 = 1 # default stride
Out_feat_4 = 2056
Out_dim_4 = (W_4 - F_4) / (S_4) + 1
# print('Output size before max pool:\t(', Out_feat_2, ', ', Out_dim_2, ', ', Out_dim_2, ')')
# print('Output size after max pool:\t(', Out_feat_2, ', ', (Out_dim_2 / max_s), ', ', (Out_dim_2 / max_s), ')')
# convolutional layer 4
W_5 = Out_dim_4 / max_s
F_5 = 3 # length of one side of kernel
S_5 = 1 # default stride
Out_feat_5 = 4112
Out_dim_5 = (W_5 - F_5) / (S_5) + 1
# print('Output size before max pool:\t(', Out_feat_2, ', ', Out_dim_2, ', ', Out_dim_2, ')')
# print('Output size after max pool:\t(', Out_feat_2, ', ', (Out_dim_2 / max_s), ', ', (Out_dim_2 / max_s), ')')
# fully connected layer 1
length = Out_dim_5 / max_s
# Round down to neared integer
if np.round(length, decimals=0) > length:
length = np.round(length, 0) - 1
else:
length = np.round(length, 0)
# print('Input shape for first fully connected layer:\t', Out_feat_2, ' * ', length, ' * ', length)
fc_1_in = Out_feat_5 * length * length
fc_1_out = 2000
# fully connected layer 2
fc_2_in = fc_1_out
fc_2_out = 3000
# fully connectd layer 3
fc_3_in = fc_2_out
fc_3_out = 68*2 # number of features required for prediction
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
## TODO: Define all the layers of this CNN, the only requirements are:
## 1. This network takes in a square (same width and height), grayscale image as input
## 2. It ends with a linear layer that represents the keypoints
## it's suggested that you make this last layer output 136 values, 2 for each of the 68 keypoint (x, y) pairs
# As an example, you've been given a convolutional layer, which you may (but don't have to) change:
# 1 input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel
# self.conv1 = nn.Conv2d(1, 32, 5)
## Note that among the layers to add, consider including:
# maxpooling layers, multiple conv layers, fully-connected layers, and other layers (such as dropout or batch normalization) to avoid overfitting
self.conv1 = nn.Conv2d(1, Out_feat_1, F_1)
self.conv2 = nn.Conv2d(Out_feat_1, Out_feat_2, F_2)
self.conv3 = nn.Conv2d(Out_feat_2, Out_feat_3, F_3)
self.conv4 = nn.Conv2d(Out_feat_3, Out_feat_4, F_4)
self.conv5 = nn.Conv2d(Out_feat_4, Out_feat_5, F_5)
self.pool = nn.MaxPool2d(max_k, max_s)
self.fc1 = nn.Linear(int(fc_1_in), int(fc_1_out))
self.fc2 = nn.Linear(int(fc_2_in), int(fc_2_out))
self.fc3 = nn.Linear(int(fc_3_in), int(fc_3_out))
self.fc1_drop = nn.Dropout(p=0.4)
self.fc2_drop = nn.Dropout(p=0.2)
def forward(self, x):
## TODO: Define the feedforward behavior of this model
## x is the input image and, as an example, here you may choose to include a pool/conv step:
## x = self.pool(F.relu(self.conv1(x)))
# two activated conv layers
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = self.pool(F.relu(self.conv4(x)))
x = self.pool(F.relu(self.conv5(x)))
# flatten
x = x.view(x.size(0), -1)
# two linear layers with dropout
x = F.relu(self.fc1(x))
x = self.fc1_drop(x)
x = F.relu(self.fc2(x))
x = self.fc2_drop(x)
x = self.fc3(x)
# a modified x, having gone through all the layers of your model, should be returned
return x
|
StarcoderdataPython
|
12847234
|
<reponame>quanganh1997polytechnique/Project-DL-Seq2Seq<gh_stars>0
"""
** deeplean-ai.com **
created by :: GauravBh1010tt
contact :: <EMAIL>
"""
from __future__ import unicode_literals, print_function, division
import math
import re
import os
import numpy as np
import torch
import random
import warnings
from io import open
import unicodedata
import matplotlib.pyplot as plt
from torch.autograd import Variable
import time
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %02ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
warnings.simplefilter('ignore')
plt.rcParams['figure.figsize'] = (8, 8)
np.random.seed(42)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
use_cuda = torch.cuda.is_available()
import zipfile
zip_ref = zipfile.ZipFile('data.zip', 'r')
zip_ref.extractall()
zip_ref.close()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# torch.cuda.set_device(1)
SOS_token = 0
EOS_token = 1
class Lang:
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}
self.n_words = 2 # Count SOS and EOS
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
# Turn a Unicode string to plain ASCII, thanks to
# http://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# Lowercase, trim, and remove non-letter characters
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
def readLangs(lang1, lang2, reverse=False):
print("Reading lines...")
# Read the file and split into lines
lines = open('data/%s-%s.txt' % (lang1, lang2), encoding='utf-8').\
read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines]
# Reverse pairs, make Lang instances
if reverse:
pairs = [list(reversed(p)) for p in pairs]
input_lang = Lang(lang2)
output_lang = Lang(lang1)
else:
input_lang = Lang(lang1)
output_lang = Lang(lang2)
return input_lang, output_lang, pairs
MAX_LENGTH = 10
eng_prefixes = (
"i am ", "i m ",
"he is", "he s ",
"she is", "she s",
"you are", "you re ",
"we are", "we re ",
"they are", "they re "
)
def filterPair(p,reverse):
return len(p[0].split(' ')) < MAX_LENGTH and \
len(p[1].split(' ')) < MAX_LENGTH and \
p[reverse].startswith(eng_prefixes)
def filterPairs(pairs, reverse):
if reverse:
reverse = 1
else:
reverse = 0
return [pair for pair in pairs if filterPair(pair,reverse)]
def prepareData(lang1, lang2, reverse=False):
input_lang, output_lang, pairs = readLangs(lang1, lang2, reverse)
print("Read %s sentence pairs" % len(pairs))
pairs = filterPairs(pairs,reverse)
print("Trimmed to %s sentence pairs" % len(pairs))
print("Counting words...")
for pair in pairs:
input_lang.addSentence(pair[0])
output_lang.addSentence(pair[1])
print("Counted words:")
print(input_lang.name, input_lang.n_words)
print(output_lang.name, output_lang.n_words)
return input_lang, output_lang, pairs
def indexesFromSentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]
def tensorFromSentence(lang, sentence):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
def tensorsFromPair(pair, input_lang, output_lang):
input_tensor = tensorFromSentence(input_lang, pair[0])
target_tensor = tensorFromSentence(output_lang, pair[1])
return (input_tensor, target_tensor)
def as_minutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def time_since(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (as_minutes(s), as_minutes(rs))
def indexes_from_sentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]
def variable_from_sentence(lang, sentence):
indexes = indexes_from_sentence(lang, sentence)
indexes.append(EOS_token)
var = Variable(torch.LongTensor(indexes).view(-1, 1))
# print('var =', var)
if use_cuda: var = var.cuda()
return var
def variables_from_pair(pair, input_lang, output_lang):
input_variable = variable_from_sentence(input_lang, pair[0])
target_variable = variable_from_sentence(output_lang, pair[1])
return (input_variable, target_variable)
def save_checkpoint(epoch, model, optimizer, directory, \
filename='best.pt'):
checkpoint=({'epoch': epoch+1,
'model': model.state_dict(),
'optimizer' : optimizer.state_dict()
})
try:
torch.save(checkpoint, os.path.join(directory, filename))
except:
os.mkdir(directory)
torch.save(checkpoint, os.path.join(directory, filename))
|
StarcoderdataPython
|
1702195
|
<filename>scraper/scrape.py
from datetime import datetime
from json import loads, dumps
from os import path, makedirs
from threading import Thread
from time import sleep
from urllib2 import quote
from tweepy.api import API
from tweepy.auth import OAuthHandler
from tweepy.cursor import Cursor
from tweepy.error import TweepError
cfgJson = open('keys.cfg').read()
cfgDict = loads(cfgJson)
consumerKeys = cfgDict['consumer_keys']
consumerSecrets = cfgDict['consumer_secrets']
accessTokens = cfgDict['access_tokens']
accessSecrets = cfgDict['access_secrets']
langs = cfgDict['langs']
queries = cfgDict['queries']
numDays = cfgDict['days'] # Number of days (until today)
perDay = cfgDict['per_day'] # Total tweets per day
outDir = cfgDict['out_dir']
currentTime = datetime.now()
year = str(currentTime.year)
month = str(currentTime.month)
day = int(currentTime.day)
sinces = []
untils = []
if not path.exists(outDir):
makedirs(outDir)
for i in range(day - numDays, day):
sinces.append(year + '-' + month + '-' + str(i))
untils.append(year + '-' + month + '-' + str(i + 1))
datePath = path.join(outDir, sinces[len(sinces) - 1])
if not path.exists(datePath):
makedirs(datePath)
for lang in langs:
dateLangPath = path.join(datePath, lang)
if not path.exists(dateLangPath):
makedirs(dateLangPath)
def scrapeThread(index):
auth = OAuthHandler(consumerKeys[index], consumerSecrets[index])
auth.set_access_token(accessTokens[index], accessSecrets[index])
api = API(auth)
try:
api.verify_credentials()
except TweepError:
print "Failed to authenticate - most likely reached rate limit/incorrect credentials!"
return
else:
print "You have successfully logged on as: " + api.me().screen_name
for i in range(0, numDays):
for query in queries[index]:
count = 0
cursor = Cursor(api.search,
q=quote(query.encode('utf-8')),
lang=langs[index],
since=sinces[i],
until=untils[i],
include_entities=True).items()
while True:
try:
tweet = cursor.next()
utc = datetime.now().strftime('%Y%m%dT%H%M%S%f')
outPath = path.join(outDir, sinces[i], langs[index], utc + '.json')
with open(outPath, 'w') as output:
output.write(dumps(tweet._json, ensure_ascii=False).encode('utf8'))
count += 1
if count == int(perDay / len(queries[index])):
break
except TweepError:
print langs[index] + " - rate limit reached! Pausing thread for 15 minutes."
sleep(60 * 15)
continue
except StopIteration:
break
print str(count) + " tweets stored in " + outPath
threads = []
threadCount = len(consumerKeys)
for index in range(0, threadCount):
thread = Thread(target=scrapeThread, args=[index])
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
|
StarcoderdataPython
|
9718705
|
<reponame>xn-twist/squat-monitor
from django.apps import AppConfig
class TwisterConfig(AppConfig):
name = 'twister'
|
StarcoderdataPython
|
1806497
|
<reponame>yangheng111/AnnotatedNetworkModelGit
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import torch
import torch.nn.functional as F
import torchvision.transforms.functional as tvF
import os
import numpy as np
# from math import log10
from datetime import datetime
# import OpenEXR
# import pyopenexrates
# from PIL import Image
# import Imath
from matplotlib import rcParams
rcParams['font.family'] = 'serif'
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
def clear_line():
"""Clears line from any characters."""
print('\r{}'.format(' ' * 80), end='\r')
def progress_bar(batch_idx, num_batches, report_interval, train_loss):
"""Neat progress bar to track training."""
dec = int(np.ceil(np.log10(num_batches)))
bar_size = 21 + dec
progress = (batch_idx % report_interval) / report_interval
fill = int(progress * bar_size) + 1
print('\rBatch {:>{dec}d} [{}{}] Train loss: {:>1.5f}'.format(batch_idx + 1, '=' * fill + '>', ' ' * (bar_size - fill), train_loss, dec=str(dec)), end='')
def time_elapsed_since(start):
"""Computes elapsed time since start."""
timedelta = datetime.now() - start
string = str(timedelta)[:-7]
ms = int(timedelta.total_seconds() * 1000)
return string, ms
def show_on_epoch_end(epoch_time, valid_time, valid_loss, valid_psnr):
"""Formats validation error stats."""
clear_line()
print('Train time: {} | Valid time: {} | Valid loss: {:>1.5f} | Avg PSNR: {:.2f} dB'.format(epoch_time, valid_time, valid_loss, valid_psnr))
def show_on_report(batch_idx, num_batches, loss, elapsed):
"""Formats training stats."""
clear_line()
dec = int(np.ceil(np.log10(num_batches)))
print('Batch {:>{dec}d} / {:d} | Avg loss: {:>1.5f} | Avg train time / batch: {:d} ms'.format(batch_idx + 1, num_batches, loss, int(elapsed), dec=dec))
def plot_per_epoch(ckpt_dir, title, measurements, y_label):
"""Plots stats (train/valid loss, avg PSNR, etc.)."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(range(1, len(measurements) + 1), measurements)
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_xlabel('Epoch')
ax.set_ylabel(y_label)
ax.set_title(title)
plt.tight_layout()
fname = '{}.png'.format(title.replace(' ', '-').lower())
plot_fname = os.path.join(ckpt_dir, fname)
plt.savefig(plot_fname, dpi=200)
plt.close()
# def load_hdr_as_tensor(img_path):
# """Converts OpenEXR image to torch float tensor."""
#
# # Read OpenEXR file
# if not OpenEXR.isOpenExrFile(img_path):
# raise ValueError('Image {img_path} is not a valid OpenEXR file')
# src = OpenEXR.InputFile(img_path)
# pixel_type = Imath.PixelType(Imath.PixelType.FLOAT)
# dw = src.header()['dataWindow']
# size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
#
# # Read into tensor
# tensor = torch.zeros((3, size[1], size[0]))
# for i, c in enumerate('RGB'):
# rgb32f = np.fromstring(src.channel(c, pixel_type), dtype=np.float32)
# tensor[i, :, :] = torch.from_numpy(rgb32f.reshape(size[1], size[0]))
#
# return tensor
def reinhard_tonemap(tensor):
"""Reinhard et al. (2002) tone mapping."""
tensor[tensor < 0] = 0
return torch.pow(tensor / (1 + tensor), 1 / 2.2)
def psnr(input, target):
"""Computes peak signal-to-noise ratio."""
return 10 * torch.log10(1 / F.mse_loss(input, target))
def create_montage(img_name, noise_type, save_path, source_t, denoised_t, clean_t, show):
"""Creates montage for easy comparison."""
fig, ax = plt.subplots(1, 3, figsize=(9, 3))
fig.canvas.set_window_title(img_name.capitalize()[:-4])
# Bring tensors to CPU
source_t = source_t.cpu().narrow(0, 0, 3)
denoised_t = denoised_t.cpu()
clean_t = clean_t.cpu()
source = tvF.to_pil_image(source_t)
denoised = tvF.to_pil_image(torch.clamp(denoised_t, 0, 1))
clean = tvF.to_pil_image(clean_t)
# Build image montage
psnr_vals = [psnr(source_t, clean_t), psnr(denoised_t, clean_t)]
titles = ['Input: {:.2f} dB'.format(psnr_vals[0]),
'Denoised: {:.2f} dB'.format(psnr_vals[1]),
'Ground truth']
zipped = zip(titles, [source, denoised, clean])
for j, (title, img) in enumerate(zipped):
ax[j].imshow(img)
ax[j].set_title(title)
ax[j].axis('off')
# Open pop up window, if requested
if show > 0:
plt.show()
# Save to files
fname = os.path.splitext(img_name)[0]
source.save(os.path.join(save_path, '{fname}-{noise_type}-noisy.png'))
denoised.save(os.path.join(save_path, '{fname}-{noise_type}-denoised.png'))
fig.savefig(os.path.join(save_path, '{fname}-{noise_type}-montage.png'), bbox_inches='tight')
class AvgMeter(object):
"""Computes and stores the average and current value.
Useful for tracking averages such as elapsed times, minibatch losses, etc.
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0.
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
|
StarcoderdataPython
|
3323323
|
<reponame>rzsaglam/project-env<filename>projectenv/main/migrations/0010_alter_paint_table.py
# Generated by Django 3.2.3 on 2021-05-31 07:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0009_alter_paint_table'),
]
operations = [
migrations.AlterModelTable(
name='paint',
table='paint',
),
]
|
StarcoderdataPython
|
1920540
|
<reponame>sadmanbd/social-lead-generator
import os
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from . import database, models, routers
models.Base.metadata.create_all(bind=database.engine)
app = FastAPI()
origins = os.getenv("ALLOWED_ORIGINS", "").split(",")
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(
routers.healthcheck.router,
prefix="/api/v1/healthcheck",
tags=["healthcheck"]
)
app.include_router(
routers.users.router,
prefix="/api/v1/users",
tags=["users"]
)
app.include_router(
routers.auth.router,
prefix="/api/v1/auth",
tags=["auth"]
)
app.include_router(
routers.twitter.router,
prefix="/api/v1/twitter",
tags=["twitter"]
)
|
StarcoderdataPython
|
1616384
|
import tensorflow as tf
from tensorflow.keras.layers import Attention
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, embed_dim=512, num_heads=8, dropout_rate=0.1, causal=False):
super(MultiHeadAttention, self).__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout_rate = dropout_rate
self.causal = causal
assert embed_dim % num_heads == 0
head_dim = embed_dim // num_heads
self.w_query = tf.keras.layers.Dense(embed_dim)
self.w_value = tf.keras.layers.Dense(embed_dim)
self.w_key = tf.keras.layers.Dense(embed_dim)
self.attention = Attention(causal=causal, dropout=dropout_rate)
self.w_projection = tf.keras.layers.Dense(embed_dim)
self.reshape_split = tf.keras.layers.Reshape((-1, num_heads, head_dim))
self.permute = tf.keras.layers.Permute((2, 1, 3))
self.reshape_split_mask = tf.keras.layers.Reshape((-1, 1))
self.permute_mask = tf.keras.layers.Permute((2, 1))
self.reshape_merge = tf.keras.layers.Reshape((-1, embed_dim))
def call(self, inputs, mask=None, training=None):
q = inputs[0] # [batch_size, tq, embed_dim]
v = inputs[1] # [batch_size, tv, embed_dim]
k = inputs[2] if len(inputs) > 2 else v # [batch_size, tv, embed_dim]
query = self.w_query(q) # [batch_size, tq, embed_dim]
query = self.separate_heads(query) # [batch_size, num_heads, tq, head_dim]
value = self.w_value(v) # [batch_size, tv, embed_dim]
value = self.separate_heads(value) # [batch_size, num_heads, tv, head_dim]
key = self.w_key(k) # [batch_size, tv, embed_dim]
key = self.separate_heads(key) # [batch_size, num_heads, tv, head_dim]
if mask is not None:
mask = self.separate_heads_mask(mask)
attention = self.attention([query, value, key], mask=mask) # [batch_size, num_heads, tq, head_dim]
attention = self.merge_heads(attention) # [batch_size, tq, embed_dim]
x = self.w_projection(attention) # [batch_size, tq, embed_dim]
return x
def separate_heads(self, x):
x = self.reshape_split(x) # [batch_size, t, num_heads, head_dim]
x = self.permute(x) # [batch_size, num_heads, t, head_dim]
return x
def separate_heads_mask(self, mask):
query_mask = mask[0] # [batch_size, tq]
value_mask = mask[1] # [batch_size, tv]
if query_mask is not None:
query_mask = self.reshape_split_mask(query_mask) # [batch_size, tq, num_heads]
query_mask = self.permute_mask(query_mask) # [batch_size, num_heads, tq]
if value_mask is not None:
value_mask = self.reshape_split_mask(value_mask) # [batch_size, tv, num_heads]
value_mask = self.permute_mask(value_mask) # [batch_size, num_heads, tv]
return [query_mask, value_mask]
def merge_heads(self, x):
x = self.permute(x) # [batch_size, t, num_heads, head_dim]
x = self.reshape_merge(x)
return x
def compute_mask(self, inputs, mask=None):
if mask:
q_mask = mask[0]
if q_mask is None:
return None
return tf.convert_to_tensor(q_mask)
return None
def get_config(self):
config = {"embed_dim": self.embed_dim, "num_heads": self.num_heads,
"dropout_rate": self.dropout_rate, "causal": self.causal}
base_config = super(MultiHeadAttention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
tf.keras.utils.get_custom_objects().update({
"MultiHeadAttention": MultiHeadAttention
})
|
StarcoderdataPython
|
8048835
|
<gh_stars>0
# coding: utf-8
# adapters/repository.py
import abc
from domain import model
from sqlalchemy.orm import Session
from typing import List
class AbstracRepository(abc.ABC):
@abc.abstractmethod
def add(self, model: object):
raise NotImplementedError
@abc.abstractmethod
def get(self, id: int) -> object:
raise NotImplementedError
class UserRepository(AbstracRepository):
def __init__(self, session: Session):
self._session = session
def add(self, user: model.User):
self._session.add(user)
def get(self, id: int) -> model.User:
return self._session.query(model.User).get(id)
def allUser(self) -> List[model.User]:
return self._session.query(model.User).all()
class FakeUserRepository(AbstracRepository):
def __init__(self):
self._session = list()
def add(self, user: model.User):
self._session.append(user)
def get(self, username: str) -> model.User:
try:
return next(user for user in self._session if user.username == username)
except StopIteration:
return None
|
StarcoderdataPython
|
11252065
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
import django
try:
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
except ImportError: # Django < 1.9
from django.contrib.contenttypes.generic import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
import uuid
from .signals import email_event
def _new_uuid():
"""Initialisation function for reference UUID."""
return str(uuid.uuid4())
class Email(models.Model):
created = models.DateTimeField(_('created'), auto_now_add=True)
updated = models.DateTimeField(_('updated'), auto_now=True)
content_type = models.ForeignKey(ContentType, null=True, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField(null=True)
content_object = GenericForeignKey('content_type', 'object_id')
email = models.CharField(_('addressee'), max_length=512)
event = models.CharField(_('event type'), max_length=32)
reason = models.CharField(_('reason'), max_length=1024, default='')
timestamp = models.DateTimeField(_('timestamp'))
uuid = models.CharField(_('reference UUID'), max_length=64, default=_new_uuid, db_index=True)
def __unicode__(self):
return '%s: %s' % (self.email, self.event)
class Meta:
if django.VERSION[0] == 1 and django.VERSION[1] > 4:
index_together = [
['content_type', 'object_id'],
]
def save(self, *args, **kwargs):
# check if we're just creating the object
if not self.pk:
creation = True
else:
creation = False
# then actually save it
super(Email, self).save(*args, **kwargs)
# and send out a proper signal with an instance of the saved model
if creation:
email_event.send(self)
|
StarcoderdataPython
|
8078544
|
<filename>python/picture_logic.py
from cv2 import cv2
def take_picture():
camera = cv2.VideoCapture(0)
saved_image_name = 'trash_object.jpg'
print(saved_image_name)
while True:
return_value, raw_image = camera.read()
display_image = raw_image
font = cv2.FONT_HERSHEY_SIMPLEX
bottom_left_corner_of_text = (350, 40)
font_scale = 1
font_color = (255, 255, 255)
line_type = 2
cv2.putText(display_image, 'Press Spacebar to take a picture',
bottom_left_corner_of_text,
font,
font_scale,
font_color,
line_type)
cv2.imshow('image', display_image)
if cv2.waitKey(1) & 0xFF == 32:
cv2.imwrite(saved_image_name, raw_image)
break
camera.release()
cv2.destroyAllWindows()
return saved_image_name
|
StarcoderdataPython
|
9653965
|
#! /usr/bin/env python3
import logging
import json
import os, sys, tempfile
import copy
import glob
import subprocess
import shutil
from jsonmerge import merge
from cwltool.executors import SingleJobExecutor
from cwltool.stdfsaccess import StdFsAccess
from cwltool.workflow import expression
from cwltool.context import RuntimeContext, getdefault
from cwltool.pathmapper import visit_class
from cwltool.mutation import MutationManager
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from .cwlutils import flatten, shortname, load_cwl
from cwl_airflow.utils.notifier import task_on_success, task_on_failure, task_on_retry, post_status
from airflow.utils.log.logging_mixin import StreamLogWriter
_logger = logging.getLogger(__name__)
class StreamLogWriterUpdated (StreamLogWriter):
def fileno(self):
return -1
class CWLStepOperator(BaseOperator):
ui_color = '#3E53B7'
ui_fgcolor = '#FFF'
@apply_defaults
def __init__(
self,
task_id=None,
reader_task_id=None,
ui_color=None,
*args, **kwargs):
self.outdir = None
self.reader_task_id = None
self.cwlwf = None
self.cwl_step = None
kwargs.update({"on_success_callback": kwargs.get("on_success_callback", task_on_success),
"on_failure_callback": kwargs.get("on_failure_callback", task_on_failure),
"on_retry_callback": kwargs.get("on_retry_callback", task_on_retry)})
super(self.__class__, self).__init__(task_id=task_id, *args, **kwargs)
self.reader_task_id = reader_task_id if reader_task_id else self.reader_task_id
if ui_color:
self.ui_color = ui_color
def execute(self, context):
post_status(context)
self.cwlwf, it_is_workflow = load_cwl(self.dag.default_args["cwl_workflow"], self.dag.default_args)
self.cwl_step = [step for step in self.cwlwf.steps if self.task_id == step.id.split("#")[-1]][0] if it_is_workflow else self.cwlwf
_logger.info('{0}: Running!'.format(self.task_id))
upstream_task_ids = [t.task_id for t in self.upstream_list] + \
([self.reader_task_id] if self.reader_task_id else [])
_logger.debug('{0}: Collecting outputs from: \n{1}'.format(self.task_id,
json.dumps(upstream_task_ids, indent=4)))
upstream_data = self.xcom_pull(context=context, task_ids=upstream_task_ids)
_logger.info('{0}: Upstream data: \n {1}'.format(self.task_id,
json.dumps(upstream_data, indent=4)))
promises = {}
for data in upstream_data: # upstream_data is an array with { promises and outdir }
promises = merge(promises, data["promises"])
if "outdir" in data:
self.outdir = data["outdir"]
_d_args = self.dag.default_args
if not self.outdir:
self.outdir = _d_args['tmp_folder']
_logger.debug('{0}: Step inputs: {1}'.format(self.task_id,
json.dumps(self.cwl_step.tool["inputs"], indent=4)))
_logger.debug('{0}: Step outputs: {1}'.format(self.task_id,
json.dumps(self.cwl_step.tool["outputs"], indent=4)))
jobobj = {}
for inp in self.cwl_step.tool["inputs"]:
jobobj_id = shortname(inp["id"]).split("/")[-1]
source_ids = []
promises_outputs = []
try:
source_field = inp["source"] if it_is_workflow else inp.get("id")
source_ids = [shortname(s) for s in source_field] if isinstance(source_field, list) else [shortname(source_field)]
promises_outputs = [promises[source_id] for source_id in source_ids if source_id in promises]
except:
_logger.warning("{0}: Couldn't find source field in step input: {1}"
.format(self.task_id,
json.dumps(inp, indent=4)))
_logger.info('{0}: For input {1} with source_ids: {2} found upstream outputs: \n{3}'
.format(self.task_id,
jobobj_id,
source_ids,
promises_outputs))
if len(promises_outputs) > 1:
if inp.get("linkMerge", "merge_nested") == "merge_flattened":
jobobj[jobobj_id] = flatten(promises_outputs)
else:
jobobj[jobobj_id] = promises_outputs
# Should also check if [None], because in this case we need to take default value
elif len(promises_outputs) == 1 and (promises_outputs[0] is not None):
jobobj[jobobj_id] = promises_outputs[0]
elif "valueFrom" in inp:
jobobj[jobobj_id] = None
elif "default" in inp:
d = copy.copy(inp["default"])
jobobj[jobobj_id] = d
else:
continue
_logger.debug('{0}: Collected job object: \n {1}'.format(self.task_id, json.dumps(jobobj, indent=4)))
def _post_scatter_eval(shortio, cwl_step):
_value_from = {
shortname(i["id"]).split("/")[-1]:
i["valueFrom"] for i in cwl_step.tool["inputs"] if "valueFrom" in i
}
_logger.debug(
'{0}: Step inputs with valueFrom: \n{1}'.format(self.task_id, json.dumps(_value_from, indent=4)))
def value_from_func(k, v):
if k in _value_from:
return expression.do_eval(
_value_from[k], shortio,
self.cwlwf.tool.get("requirements", []),
None, None, {}, context=v)
else:
return v
return {k: value_from_func(k, v) for k, v in shortio.items()}
job = _post_scatter_eval(jobobj, self.cwl_step)
_logger.info('{0}: Final job data: \n {1}'.format(self.task_id, json.dumps(job, indent=4)))
_d_args['outdir'] = tempfile.mkdtemp(prefix=os.path.join(self.outdir, "step_tmp"))
_d_args['tmpdir_prefix'] = os.path.join(_d_args['outdir'], 'cwl_tmp_')
_d_args['tmp_outdir_prefix'] = os.path.join(_d_args['outdir'], 'cwl_outdir_')
_d_args["record_container_id"] = True
_d_args["cidfile_dir"] = _d_args['outdir']
_d_args["cidfile_prefix"] = self.task_id
_logger.debug(
'{0}: Runtime context: \n {1}'.format(self, _d_args))
executor = SingleJobExecutor()
runtimeContext = RuntimeContext(_d_args)
runtimeContext.make_fs_access = getdefault(runtimeContext.make_fs_access, StdFsAccess)
for inp in self.cwl_step.tool["inputs"]:
if inp.get("not_connected"):
del job[shortname(inp["id"].split("/")[-1])]
_stderr = sys.stderr
sys.stderr = sys.__stderr__
(output, status) = executor(self.cwl_step.embedded_tool if it_is_workflow else self.cwl_step,
job,
runtimeContext,
logger=_logger)
sys.stderr = _stderr
if not output and status == "permanentFail":
raise ValueError
_logger.debug(
'{0}: Embedded tool outputs: \n {1}'.format(self.task_id, json.dumps(output, indent=4)))
promises = {}
for out in self.cwl_step.tool["outputs"]:
out_id = shortname(out["id"])
jobout_id = out_id.split("/")[-1]
try:
promises[out_id] = output[jobout_id]
except:
continue
# Unsetting the Generation from final output object
visit_class(promises, ("File",), MutationManager().unset_generation)
data = {"promises": promises, "outdir": self.outdir}
_logger.info(
'{0}: Output: \n {1}'.format(self.task_id, json.dumps(data, indent=4)))
return data
def on_kill(self):
_logger.info("Stop docker containers")
for cidfile in glob.glob(os.path.join(self.dag.default_args["cidfile_dir"], self.task_id + "*.cid")): # make this better, doesn't look good to read from self.dag.default_args
try:
with open(cidfile, "r") as inp_stream:
_logger.debug(f"""Read container id from {cidfile}""")
command = ["docker", "kill", inp_stream.read()]
_logger.debug(f"""Call {" ".join(command)}""")
p = subprocess.Popen(command, shell=False)
try:
p.wait(timeout=10)
except subprocess.TimeoutExpired:
p.kill()
except Exception as ex:
_logger.error(f"""Failed to stop docker container with ID from {cidfile}\n {ex}""")
# _logger.info(f"""Delete temporary output directory {self.outdir}""")
# try:
# shutil.rmtree(self.outdir)
# except Exception as ex:
# _logger.error(f"""Failed to delete temporary output directory {self.outdir}\n {ex}""")
|
StarcoderdataPython
|
134282
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Lint as: python3
"""Module for log_prob transformation."""
from jax import core as jax_core
from jax import random
from jax import tree_util
import jax.numpy as np
from oryx.core import trace_util
from oryx.core.interpreters import inverse
from oryx.core.interpreters import propagate
__all__ = [
'LogProbRules',
'log_prob'
]
safe_map = jax_core.safe_map
InverseAndILDJ = inverse.core.InverseAndILDJ
ildj_registry = inverse.core.ildj_registry
class LogProbRules(dict):
"""Default dictionary for log_prob propagation rules.
By default, the rules for LogProb propagation are just the InverseAndILDJ
rules, but instead of raising a NotImplementedError, LogProb will silently
fail. This default dict-like class implements this behavior, but also allows
primitives to register custom propagation rules.
"""
def __missing__(self, prim):
self[prim] = rule = make_default_rule(prim)
return rule
log_prob_rules = LogProbRules()
# The log_prob_registry is used to compute log_prob values from samples after
# propagation is done.
log_prob_registry = {}
def log_prob(f):
"""LogProb function transformation."""
def wrapped(sample, *args, **kwargs):
"""Function wrapper that takes in log_prob arguments."""
# Trace the function using a random seed
dummy_seed = random.PRNGKey(0)
jaxpr, _ = trace_util.stage(f)(dummy_seed, *args, **kwargs)
flat_outargs, _ = tree_util.tree_flatten(sample)
flat_inargs, _ = tree_util.tree_flatten(args)
constcells = [InverseAndILDJ.new(val) for val in jaxpr.literals]
flat_incells = [
InverseAndILDJ.unknown(trace_util.get_shaped_aval(dummy_seed))
] + [InverseAndILDJ.new(val) for val in flat_inargs]
flat_outcells = [InverseAndILDJ.new(a) for a in flat_outargs]
# Re-use the InverseAndILDJ propagation but silently fail instead of
# erroring when we hit a primitive we can't invert.
env = propagate.propagate(InverseAndILDJ, log_prob_rules, jaxpr.jaxpr,
constcells, flat_incells, flat_outcells)
# Traverse the resulting environment, looking for primitives that have
# registered log_probs.
final_log_prob = _accumulate_log_probs(env)
return final_log_prob
return wrapped
def _accumulate_log_probs(env):
"""Recursively traverses Jaxprs to accumulate log_prob values."""
final_log_prob = 0.0
eqns = safe_map(propagate.Equation.from_jaxpr_eqn, env.jaxpr.eqns)
for eqn in eqns:
if eqn.primitive in log_prob_registry:
var, = eqn.outvars
if var not in env:
raise ValueError('Cannot compute log_prob of function.')
incells = [env.read(v) for v in eqn.invars]
outcells = [env.read(v) for v in eqn.outvars]
outcell, = outcells
if not outcell.top():
raise ValueError('Cannot compute log_prob of function.')
lp = log_prob_registry[eqn.primitive](
[cell if not cell.top() else cell.val for cell in incells],
outcell.val, **eqn.params
)
assert np.ndim(lp) == 0, 'log_prob must return a scalar.'
# Accumulate ILDJ term
final_log_prob += lp + np.sum(outcell.ildj)
for subenv in env.subenvs.values():
sub_lp = _accumulate_log_probs(subenv)
final_log_prob += sub_lp
return final_log_prob
def make_default_rule(prim):
"""Creates rule for prim without a registered log_prob."""
def rule(incells, outcells, **params):
"""Executes the inverse rule but fails if the inverse isn't implemented."""
try:
return ildj_registry[prim](incells, outcells, **params)
except NotImplementedError:
return incells, outcells, None
return rule
|
StarcoderdataPython
|
11245336
|
# Python program to print all positive Numbers in a range
#note : I'm using two different codes for two different list
#1] list of numbers using for loop
list1 = [12, -7, 5, 64,-14]
# iterating each number in list
for num in list1:
# checking condition
if num >= 0:
print(num,end = " "):
#2] list of numbers
list2 = [12, 14, -95, 3]
num = 0
# using while loop
while(num < len(list2)):
# checking condition
if list2[num] >= 0:
print(list2[num], end = " ")
# increment num
num += 1
|
StarcoderdataPython
|
11301790
|
<filename>cea/demand/metamodel/nn_generator/nn_presampled_caller.py
# coding=utf-8
"""
'nn_trainer.py' script fits a neural net on inputs and targets
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
import os
import numpy as np
import pandas as pd
from cea.demand.metamodel.nn_generator.nn_random_sampler import input_dropout
def presampled_collector(locator,collect_count,config):
number_samples_scaler = config.neural_network.number_samples_scaler
nn_presample_path = locator.get_minmaxscaler_folder()
i=0
j=0
for i in range(number_samples_scaler):
#i = collect_count + i
file_path_inputs = os.path.join(nn_presample_path, "input%(i)s.csv" % locals())
file_path_targets = os.path.join(nn_presample_path, "target%(i)s.csv" % locals())
batch_input_matrix = np.asarray(pd.read_csv(file_path_inputs))
batch_taget_matrix = np.asarray(pd.read_csv(file_path_targets))
batch_input_matrix, batch_taget_matrix = input_dropout(batch_input_matrix, batch_taget_matrix)
if j < 1:
urban_input_matrix = batch_input_matrix
urban_taget_matrix = batch_taget_matrix
else:
urban_input_matrix = np.concatenate((urban_input_matrix, batch_input_matrix), axis=0)
urban_taget_matrix = np.concatenate((urban_taget_matrix, batch_taget_matrix), axis=0)
j=j+1
print(i)
collect_count=i+1
return urban_input_matrix, urban_taget_matrix, collect_count
|
StarcoderdataPython
|
5012999
|
import unidecode
import re
from collections import *
def _removeDiacritics(word):
return unidecode.unidecode(word)
def _removeDashes(word):
return re.sub(r"[^a-zA-Z]+", r"", word)
def normalize(word):
return _removeDashes(_removeDiacritics(word))
def computeFeed(words):
feed = list(map(lambda w: [w[1][0], w[0]], words.items()))
cumulativeFreq = 0.
for i in range(0, len(feed)):
cumulativeFreq += feed[i][0]
feed[i][0] = cumulativeFreq
for i in range(0, len(feed)):
feed[i][0] /= cumulativeFreq
return list(map(lambda w: (w[0], w[1]), feed))
def transformDictionary(dictName):
file = open(dictName, 'r')
lines = list(filter(lambda x: x != '', file.read().split('\n')))
file.close()
words = { }
for line in lines[1 : ]:
parameters = line.split('|')
word = parameters[0]
freq = float(parameters[-1])
if parameters[2] == 'LETTER':
continue
if parameters[2] != 'PONC':
wordRepr = normalize(word)
else:
wordRepr = '.'
if word not in words:
words[word] = [wordRepr, freq]
else:
words[word][1] += freq
file = open('new_lexique.csv', 'a')
for (w, l) in words.items():
file.write('{}|{}|{}\n'.format(w, l[0], l[1]))
file.close()
# return:
# - an ordered Dictionary (normalized word) => [frequency, [associated words]]
# - an ordered Dictionary (letter) => [frequency]
def loadDictionary2(dictName, userName):
file = open(dictName, 'r')
lines = list(filter(lambda x: x != '', file.read().split('\n')))
file.close()
file = open(userName, 'r')
lines += list(filter(lambda x: x != '', file.read().split('\n')))
file.close()
letters = { }
words = { }
for line in lines[1 : ]:
parameters = line.split('|')
word = parameters[0]
wordRepr = parameters[1]
if word[0] == '[':
freq = 0.
letters[wordRepr] = [float(parameters[-1])]
else:
freq = float(parameters[-1])
if wordRepr not in words:
words[wordRepr] = [freq, [(word, freq)]]
else:
words[wordRepr][0] += freq
words[wordRepr][1].append((word, freq))
orderedWords = OrderedDict()
for w in sorted(words.keys()):
orderedWords[w] = [words[w][0], list(map(
lambda x: x[0],
reversed(sorted(words[w][1], key = lambda x: x[1]))
))]
orderedLetters = OrderedDict()
for l in sorted(letters.keys()):
orderedLetters[l] = [letters[l][0]]
return orderedWords, orderedLetters
# return:
# - an ordered Dictionary (normalized word) => [frequency, [associated words]]
# - an ordered Dictionary (letter) => [frequency, []]
def loadDictionary(dictName, userName):
file = open(dictName, 'r')
lines = list(filter(lambda x: x != '', file.read().split('\n')))
file.close()
file = open(userName, 'r')
lines += list(filter(lambda x: x != '', file.read().split('\n')))
file.close()
letters = { }
words = { }
for line in lines[1:]:
parameters = line.split('|')
word = parameters[0]
isLetter = parameters[2] == 'LETTER'
if parameters[2] != 'PONC':
wordRepr = normalize(word)
else:
wordRepr = '.'
if not isLetter:
freq = float(parameters[-1])
else:
freq = 0
letters[word] = [float(parameters[-1]), []]
word = '[{}'.format(word)
if wordRepr not in words:
words[wordRepr] = [freq, [(word, freq)]]
else:
words[wordRepr][0] += freq
if word not in list(map(lambda x: x[0], words[wordRepr][1])):
words[wordRepr][1].append((word, freq))
orderedWords = OrderedDict()
for w in sorted(words.keys()):
orderedWords[w] = [words[w][0], list(map(
lambda x: x[0],
sorted(words[w][1], key = lambda x: x[1])
))]
orderedLetters = OrderedDict()
for l in sorted(letters.keys()):
orderedLetters[l] = [letters[l][0], []]
return orderedWords, orderedLetters
|
StarcoderdataPython
|
3567724
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Distribution, Normal, Categorical, Independent
from torch.distributions import register_kl
class MixtureSameFamily(Distribution):
"""
A distribution made of a discrete mixture of K distributions of the same family.
Consider the example of mixing K diagonal Gaussians:
X|\phi, \mu, \sigma ~ MixtureSameFamily(\phi, Independent(Normal(\mu, \sigma^2), 1))
where
* \phi \in R^K
* \mu \in R^D
* \sigma \in R+^D
is such that
I|\phi ~ Categorical(softmax(\phi))
X|i, \mu_i, \sigma_i ~ Normal(\mu_i, \sigma_i^2)
Thus, where w = softmax(\phi),
p(x) = \sum_{i=1}^K w_i N(x|\mu_i, \sigma_i^2)
We can sample efficiently (though not yet with a reparameterisation).
And we can assess log p(x) in closed-form.
"""
def __init__(self, logits, components: Distribution):
"""
If your distribution is say a product of D independent Normal variables, make sure to
wrap it around Independent.
num_components: K
logits: [B, K]
components: [B, K, D] where batch_shape is [K] and event_shape is [D]
Note that if you have Normal(loc, scale) where the parameters are [K,D]
you need to wrap it around Independent(Normal(loc, scale), 1) to make the event_shape be [D]
otherwise it will be []
"""
if len(logits.shape) != len(components.batch_shape):
raise ValueError("The shape of logits must match the batch shape of your components")
if logits.shape[-1] != components.batch_shape[-1]:
raise ValueError("You need as many logits as you have components")
# Exclude the component dimension
batch_shape = logits.shape[:-1]
num_components = logits.shape[-1] # K
super().__init__(batch_shape, components.event_shape)
self.num_components = num_components
self.log_weights = F.log_softmax(logits, dim=-1)
self.categorical = Categorical(logits=logits)
self.components = components
def log_prob(self, x):
"""
x: [sample_shape, batch_shape, event_shape]
returns: [sample_shape, batch_shape]
"""
# Let's introduce a dimension for the components
# [sample_shape, batch_shape, 1, event_shape]
x = x.unsqueeze(-len(self.event_shape) - 1)
# [sample_shape, batch_shape, num_components]
log_joint_prob = self.components.log_prob(x) + self.log_weights
# now we marginalise the components
return torch.logsumexp(log_joint_prob, dim=-1)
def rsample(self, sample_shape=torch.Size()):
raise NotImplementedError("Not implemented yet")
def sample(self, sample_shape=torch.Size()):
"""Return a sample with shape [sample_shape, batch_shape, event_shape]"""
# [sample_shape, batch_shape, num_components, event_shape]
x = self.components.rsample(sample_shape)
# [sample_shape, batch_shape]
indicators = self.categorical.sample(sample_shape)
# [sample_shape, batch_shape, num_components]
indicators = F.one_hot(indicators, self.num_components)
if len(self.components.event_shape):
# [sample_shape, batch_shape, num_components, 1]
indicators = indicators.unsqueeze(-1)
# reduce the component dimension
return (x * indicators.type(x.dtype)).sum(len(sample_shape) + len(self.batch_shape))
class MixtureOfGaussians(MixtureSameFamily):
def __init__(self, logits, locations, scales):
"""
logits: [B, K]
locations: [B, K, D]
scales: [B, K, D]
"""
super().__init__(logits, Independent(Normal(loc=locations, scale=scales), 1))
def kl_gaussian_mog(p, q, num_samples=1):
"""
Estimate KL(p||q) = E_p[ \log q(z) - log p(z)] = E_p[\log p(z)] - E_p[log q(z)] = - H(p) - E_p[log q(z)]
where we either use the closed-form entropy of p if available, or MC estimate it,
and always MC-estimate the second term.
"""
if num_samples == 1:
# [1, ...]
z = p.rsample().unsqueeze(0)
else:
# [num_samples, ...]
z = p.rsample(torch.Size([num_samples]))
try:
H_p = p.entropy()
except NotImplementedError:
H_p = - p.log_prob(z).mean(0)
return - H_p - q.log_prob(z).mean(0)
@register_kl(Independent, MixtureSameFamily)
def _kl_gaussian_mog(p, q):
return kl_gaussian_mog(p, q, 1)
|
StarcoderdataPython
|
8192741
|
import pytest
from numpy.testing import assert_array_almost_equal
from numpy import array
from carsons.carsons import convert_geometric_model
# `carsons` implements the model entirely in SI metric units, however this
# conversion allows us to enter in impedance as ohm-per-mile in the test
# harness, which means we can lift matrices directly out of the ieee test
# networks.
OHM_PER_MILE_TO_OHM_PER_METER = 1 / 1_609.344
OHM_PER_KILOMETER_TO_OHM_PER_METER = 1 / 1_000
class ACBN_geometry_line():
""" IEEE 13 Configuration 601 Line Geometry """
def __init__(self, ƒ=60):
self.frequency = ƒ
@property
def resistance(self):
return {
'A': 0.000115575,
'C': 0.000115575,
'B': 0.000115575,
'N': 0.000367852,
}
@property
def geometric_mean_radius(self):
return {
'A': 0.00947938,
'C': 0.00947938,
'B': 0.00947938,
'N': 0.00248107,
}
@property
def wire_positions(self):
return {
'A': (0.762, 8.5344),
'C': (2.1336, 8.5344),
'B': (0, 8.5344),
'N': (1.2192, 7.3152),
}
@property
def phases(self):
return [
'A',
'C',
'B',
'N',
]
def ACBN_line_phase_impedance_60Hz():
""" IEEE 13 Configuration 601 Impedance Solution At 60Hz """
return OHM_PER_MILE_TO_OHM_PER_METER * array([
[0.3465 + 1.0179j, 0.1560 + 0.5017j, 0.1580 + 0.4236j],
[0.1560 + 0.5017j, 0.3375 + 1.0478j, 0.1535 + 0.3849j],
[0.1580 + 0.4236j, 0.1535 + 0.3849j, 0.3414 + 1.0348j]])
def ACBN_line_phase_impedance_50Hz():
""" IEEE 13 Configuration 601 Impedance Solution At 50Hz """
return OHM_PER_KILOMETER_TO_OHM_PER_METER * array([
[0.2101 + 0.5372j, 0.09171 + 0.2691j, 0.09295 + 0.2289j],
[0.09171 + 0.2691j, 0.20460 + 0.552j, 0.09021 + 0.2085j],
[0.09295 + 0.2289j, 0.09021 + 0.2085j, 0.207 + 0.5456j]])
class CBN_geometry_line():
""" IEEE 13 Configuration 603 Line Geometry """
def __init__(self, ƒ=60):
self.frequency = ƒ
@property
def resistance(self):
return {
'B': 0.000695936,
'C': 0.000695936,
'N': 0.000695936,
}
@property
def geometric_mean_radius(self):
return {
'B': 0.00135941,
'C': 0.00135941,
'N': 0.00135941,
}
@property
def wire_positions(self):
return {
'B': (2.1336, 8.5344),
'C': (0, 8.5344),
'N': (1.2192, 7.3152),
}
@property
def phases(self):
return [
'B',
'C',
'N',
]
def CBN_line_phase_impedance_60Hz():
""" IEEE 13 Configuration 603 Impedance Solution At 60Hz """
return OHM_PER_MILE_TO_OHM_PER_METER * array([
[0.0000 + 0.0000j, 0.0000 + 0.0000j, 0.0000 + 0.0000j],
[0.0000 + 0.0000j, 1.3294 + 1.3471j, 0.2066 + 0.4591j],
[0.0000 + 0.0000j, 0.2066 + 0.4591j, 1.3238 + 1.3569j]])
def CBN_line_phase_impedance_50Hz():
""" IEEE 13 Configuration 603 Impedance Solution At 50Hz """
return OHM_PER_KILOMETER_TO_OHM_PER_METER * array([
[0.0000 + 0.0000j, 0.0000 + 0.0000j, 0.0000 + 0.0000j],
[0.0000 + 0.0000j, 0.8128 + 0.7144j, 0.1153 + 0.2543j],
[0.0000 + 0.0000j, 0.1153 + 0.2543j, 0.8097 + 0.7189j]])
class CN_geometry_line():
""" IEEE 13 Configuration 605 Line Geometry"""
def __init__(self, ƒ=60):
self.frequency = ƒ
@property
def resistance(self):
return {
'C': 0.000695936,
'N': 0.000695936,
}
@property
def geometric_mean_radius(self):
return {
'C': 0.00135941,
'N': 0.00135941,
}
@property
def wire_positions(self):
return {
'C': (0, 8.8392),
'N': (0.1524, 7.3152),
}
@property
def phases(self):
return [
'C',
'N',
]
def CN_line_phase_impedance_60Hz():
""" IEEE 13 Configuration 605 Impedance Solution At 60Hz """
return OHM_PER_MILE_TO_OHM_PER_METER * array([
[0.0000 + 0.0000j, 0.0000 + 0.0000j, 0.0000 + 0.0000j],
[0.0000 + 0.0000j, 0.0000 + 0.0000j, 0.0000 + 0.0000j],
[0.0000 + 0.0000j, 0.0000 + 0.0000j, 1.3292 + 1.3475j]])
def CN_line_phase_impedance_50Hz():
""" IEEE 13 Configuration 605 Impedance Solution At 50Hz """
return OHM_PER_KILOMETER_TO_OHM_PER_METER * array([
[0.0000 + 0.0000j, 0.0000 + 0.0000j, 0.0000 + 0.0000j],
[0.0000 + 0.0000j, 0.0000 + 0.0000j, 0.0000 + 0.0000j],
[0.0000 + 0.0000j, 0.0000 + 0.0000j, 0.8127 + 0.7146j]])
@pytest.mark.parametrize(
"line,frequency,expected_impedance",
[(ACBN_geometry_line, 60, ACBN_line_phase_impedance_60Hz()),
(CBN_geometry_line, 60, CBN_line_phase_impedance_60Hz()),
(CN_geometry_line, 60, CN_line_phase_impedance_60Hz()),
(ACBN_geometry_line, 50, ACBN_line_phase_impedance_50Hz()),
(CBN_geometry_line, 50, CBN_line_phase_impedance_50Hz()),
(CN_geometry_line, 50, CN_line_phase_impedance_50Hz())])
def test_converts_geometry_to_phase_impedance(
line, frequency, expected_impedance):
actual_impedance = convert_geometric_model(line(ƒ=frequency))
assert_array_almost_equal(expected_impedance,
actual_impedance)
|
StarcoderdataPython
|
4916621
|
<reponame>YeoLab/gscripts<filename>gscripts/rnaseq/helpers.py
__author__ = 'gpratt'
import pandas as pd
import pyBigWig
import pybedtools
import scipy
def counts_to_rpkm(featureCountsTable):
"""
Given a dataframe or a text file from featureCounts and converts that thing into a dataframe of RPKMs
"""
if isinstance(featureCountsTable, str):
featureCountsTable = pd.read_table(featureCountsTable, skiprows=1, index_col=0)
counts = featureCountsTable.ix[:, 5:]
lengths = featureCountsTable['Length']
mapped_reads = counts.sum()
return (counts * pow(10, 9)).div(mapped_reads, axis=1).div(lengths, axis=0)
class ReadDensity():
def __init__(self, pos, neg):
self.pos = pyBigWig.open(pos)
self.neg = pyBigWig.open(neg)
def values(self, chrom, start, end, strand):
if strand == "+":
return self.pos.values(chrom, start, end)
elif strand == "-":
return list(reversed(self.neg.values(chrom, start, end)))
else:
raise("Strand neither + or -")
def miso_to_bed(miso_list):
"""
:param miso_list: list miso location
:return: bedtool of exons from miso locations
"""
result = []
for exon in miso_list:
chrom, start, stop, strand = exon.split(":")
result.append(pybedtools.create_interval_from_list([chrom, start, stop, "0", "0", strand]))
return pybedtools.BedTool(result)
def fisher_exact_on_genes(regulated, bound, all_genes):
"""
:param regulated: set of regulated genes
:param bound: set of bound genes
:param all_genes: all genes to analyze, generall all genes or all protein coding genes
:return:
"""
regulated = regulated & all_genes
bound = bound & all_genes
not_regulated = all_genes - set(regulated)
not_bound = all_genes - set(bound)
bound_and_regulated = len(regulated & bound)
bound_and_not_regulated = len(bound & not_regulated)
not_bound_and_regulated = len(not_bound & regulated)
not_bound_and_not_regulated = len(not_bound & not_regulated)
counts = pd.Series({"bound_and_regulated": bound_and_regulated,
"bound_and_not_regulated": bound_and_not_regulated,
"not_bound_and_regulated": not_bound_and_regulated,
"not_bound_and_not_regulated": not_bound_and_not_regulated,
})
test = scipy.stats.fisher_exact([[counts['bound_and_regulated'], counts['not_bound_and_regulated']],
[counts['bound_and_not_regulated'], counts['not_bound_and_not_regulated']]])
counts['p_value'] = test[1]
return counts
def fisher_exact_df(expression_df, motif_df, all_genes):
"""
:param expression_df: dataframe with two indexes, thing to group by and genes
:param motif_df: dataframe of bound genes with two index levels, level 1 is groups (cds, 3' utr) and level 2 is genes, assumes that binding is counted per gene that has a region
This gets around the issue of increaseing unbound regions when the gene doesn't actually have a region
i.e intronless genes being counted as unbound instead of discarded for the puropse of the analysis
:return:
"""
result = {}
for regulated_name, df in expression_df.groupby(level=0):
for bound_name in motif_df.index.levels[0]:
regulated = set(df.ix[regulated_name].index)
#This is for filtering events that may not exist in the bound list out to keep stats good
regulated = set(motif_df.ix[bound_name].index) & regulated
all_genes_in_region = all_genes & set(motif_df.ix[bound_name].index)
bound = set(motif_df[motif_df['count'] > 0].ix[bound_name].index)
counts = fisher_exact_on_genes(regulated, bound, all_genes_in_region)
result[(regulated_name, bound_name)] = counts
result_df = pd.DataFrame(result).T
result_df['fraction_bound_and_regulated'] = result_df.bound_and_regulated / (result_df.bound_and_regulated + result_df.not_bound_and_regulated)
result_df['fraction_bound_and_not_regulated'] = result_df.bound_and_not_regulated / (result_df.bound_and_not_regulated + result_df.not_bound_and_not_regulated)
result_df.p_value = result_df.p_value * len(result_df.p_value)
result_df = result_df.drop("uncatagorized", level=1)
result_df['percent_bound_and_regulated'] = result_df['fraction_bound_and_regulated'] * 100
result_df['percent_bound_and_not_regulated'] = result_df['fraction_bound_and_not_regulated'] * 100
return result_df
|
StarcoderdataPython
|
4940003
|
<reponame>RiverArchitect/program
# !/usr/bin/python
try:
import sys, os, arcpy, logging, random
from arcpy.sa import *
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + "\\.site_packages\\riverpy\\")
import config
import cReachManager as cRM
import cDefinitions as cDef
import fGlobal as fGl
except:
print("ExceptionERROR: Missing fundamental packages (required: arcpy, os, sys, logging, random).")
class VolumeAssessment:
def __init__(self, unit_system, org_ras_dir, mod_ras_dir, reach_ids):
# unit_system must be either "us" or "si"
# feature_ids = list of feature shortnames
# reach_ids = list of reach names to limit the analysis
# general directories and parameters
self.cache = config.dir2va + ".cache%s\\" % str(random.randint(1000000, 9999999))
self.vol_name = mod_ras_dir.split(":\\")[-1].split(":/")[-1].split("01_Conditions\\")[-1].split("01_Conditions/")[-1].split(".tif")[0].replace("\\", "_").replace("/", "_").replace("_dem", "")
fGl.chk_dir(self.cache)
fGl.clean_dir(self.cache)
self.logger = logging.getLogger("logfile")
self.output_ras_dir = config.dir2va + "Output\\%s\\" % self.vol_name
fGl.chk_dir(self.output_ras_dir)
fGl.clean_dir(self.output_ras_dir)
self.rasters = []
self.raster_info = ""
self.rasters_for_pos_vol = {}
self.rasters_for_neg_vol = {}
self.reader = cRM.Read()
self.reaches = cDef.ReachDefinitions()
self.volume_neg_dict = {}
self.volume_pos_dict = {}
try:
self.orig_raster = arcpy.Raster(org_ras_dir)
except:
self.orig_raster = Float(-1)
self.logger.info("ERROR: Cannot load original DEM")
try:
self.modified_raster = arcpy.Raster(mod_ras_dir)
except:
self.modified_raster = Float(-1)
self.logger.info("ERROR: Cannot load modified DEM.")
# set relevant reaches
try:
self.reach_ids_applied = reach_ids
self.reach_names_applied = []
for rn in self.reach_ids_applied:
self.reach_names_applied.append(self.reaches.dict_id_names[rn])
except:
self.reach_ids_applied = self.reaches.id_xlsx
self.reach_names_applied = self.reaches.name_dict
self.logger.info("WARNING: Cannot identify reaches.")
# set unit system variables
if ("us" in str(unit_system)) or ("si" in str(unit_system)):
self.units = unit_system
else:
self.units = "us"
self.logger.info("WARNING: Invalid unit_system identifier. unit_system must be either \'us\' or \'si\'.")
self.logger.info(" Setting unit_system default to \'us\'.")
if self.units == "us":
self.convert_volume_to_cy = 0.037037037037037037037037037037037 #ft3 -> cy: float((1/3)**3)
self.unit_info = " cubic yard"
self.volume_threshold = 0.99 # ft -- CHANGE lod US customary HERE --
else:
self.convert_volume_to_cy = 1.0 # m3
self.unit_info = " cubic meter"
self.volume_threshold = 0.30 # m -- CHANGE lod SI metric HERE --
def make_volume_diff_rasters(self):
# Writes Raster Dataset to Output/Rasters/vol_name folder
self.logger.info("")
self.logger.info(" * creating volume difference Rasters ...")
for rn in self.reach_ids_applied:
if not (rn == "none"):
reach_name = str(rn)
else:
reach_name = "ras" + str(rn)[0]
arcpy.gp.overwriteOutput = True
arcpy.env.workspace = self.cache
try:
extents = self.reader.get_reach_coordinates(self.reaches.dict_id_int_id[rn])
except:
extents = "MAXOF"
self.logger.info("ERROR: Could not retrieve reach coordinates.")
if not (type(extents) == str):
try:
# XMin, YMin, XMax, YMax
arcpy.env.extent = arcpy.Extent(extents[0], extents[1], extents[2], extents[3])
except:
self.logger.info("ERROR: Failed to set reach extents -- output is corrupted.")
continue
else:
arcpy.env.extent = extents
if str(self.vol_name).__len__() > 5:
ras_name = reach_name + "_" + str(self.vol_name)[0:5]
else:
ras_name = reach_name + "_" + str(self.vol_name)
self.logger.info(" * making excavation Raster ... ")
try:
excav_ras = Con(Float(self.modified_raster) <= Float(self.orig_raster),
Con(Float(Abs(self.orig_raster - self.modified_raster)) >= self.volume_threshold,
Float(Abs(self.orig_raster - self.modified_raster)), Float(0.0)),
Float(0.0))
except arcpy.ExecuteError:
self.logger.info(arcpy.GetMessages(2))
arcpy.AddError(arcpy.GetMessages(2))
except Exception as e:
self.logger.info(e.args[0])
arcpy.AddError(e.args[0])
except:
self.logger.info("ERROR: (arcpy).")
self.logger.info(arcpy.GetMessages())
try:
self.rasters_for_neg_vol.update({rn: excav_ras})
self.volume_neg_dict.update({rn: -0.0})
self.rasters.append(ras_name + "exc.tif")
excav_ras.save(self.output_ras_dir + ras_name + "exc.tif")
except:
self.logger.info("ERROR: Raster could not be saved.")
self.logger.info(" * making fill Raster ... ")
try:
fill_ras = Con(Float(self.modified_raster) > Float(self.orig_raster),
Con(Float(Abs(self.modified_raster - self.orig_raster)) >= self.volume_threshold,
Float(Abs(self.modified_raster - self.orig_raster)), Float(0.0)),
Float(0.0))
except arcpy.ExecuteError:
self.logger.info(arcpy.GetMessages(2))
arcpy.AddError(arcpy.GetMessages(2))
except Exception as e:
self.logger.info(e.args[0])
arcpy.AddError(e.args[0])
except:
self.logger.info("ERROR: (arcpy).")
self.logger.info(arcpy.GetMessages())
try:
self.rasters_for_pos_vol.update({rn: fill_ras})
self.volume_pos_dict.update({rn: +0.0})
self.rasters.append(ras_name + "fill.tif")
fill_ras.save(self.output_ras_dir + ras_name + "fill.tif")
except:
self.logger.info("ERROR: Raster could not be saved.")
def volume_computation(self):
self.logger.info(" * calculating volume differences ...")
# requires 3D extension
arcpy.CheckOutExtension("3D")
arcpy.env.extent = "MAXOF"
for rn in self.reach_ids_applied:
try:
self.logger.info(" * calculating fill volume from " + str(self.rasters_for_pos_vol[rn]))
self.logger.info(" *** takes time ***")
feat_vol = arcpy.SurfaceVolume_3d(self.rasters_for_pos_vol[rn], "", "ABOVE", 0.0, 1.0)
voltxt = feat_vol.getMessage(1).split("Volume=")[1]
self.logger.info(" RESULT: " + str(float(voltxt)*self.convert_volume_to_cy) + self.unit_info + ".")
self.volume_pos_dict[rn] = float(voltxt) * self.convert_volume_to_cy
except:
self.logger.info("ERROR: Calculation of volume from " + str(self.rasters_for_pos_vol[rn]) + " failed.")
try:
self.logger.info(" * calculating excavation volume from " + str(self.rasters_for_neg_vol[rn]))
self.logger.info(" *** takes time ***")
feat_vol = arcpy.SurfaceVolume_3d(self.rasters_for_neg_vol[rn], "", "ABOVE", 0.0, 1.0)
voltxt = feat_vol.getMessage(1).split("Volume=")[1]
self.logger.info(" RESULT: " + str(float(voltxt)*self.convert_volume_to_cy) + self.unit_info + ".")
self.volume_neg_dict[rn] = float(voltxt) * self.convert_volume_to_cy
except:
self.logger.info("ERROR: Calculation of volume from " + str(self.rasters_for_neg_vol[rn]) + " failed.")
# ALTERNATIVE OPTION IF arcpy.SurfaceVolume_3d FAILS
# import numpy
# myArray = arcpy.RasterToNumPyArray(outVol)
# totVolume = numpy.sum(myArray)
arcpy.CheckInExtension("3D")
def get_volumes(self):
self.make_volume_diff_rasters()
self.volume_computation()
# write excavation volumes to workbook
writer = cRM.Write(self.output_ras_dir)
writer.write_volumes(self.vol_name, self.reach_names_applied,
fGl.dict_values2list(self.volume_neg_dict.values()), self.unit_info.strip(), -1)
del writer
# write fill volumes to workbook
writer = cRM.Write(self.output_ras_dir)
writer.write_volumes(self.vol_name, self.reach_names_applied,
fGl.dict_values2list(self.volume_pos_dict.values()), self.unit_info.strip(), 1)
self.logger.info("FINISHED.")
# copy logfile (contains volume information)
try:
from shutil import copyfile
copyfile(config.dir2ra + "logfile.log", config.dir2va + "Output\\Logfiles\\logfile.log")
except:
pass
return self.vol_name, self.output_ras_dir
def __call__(self):
print("Class Info: <type> = VolumeAssessment (%s)" % os.path.dirname(__file__))
print(dir(self))
|
StarcoderdataPython
|
11336783
|
import urllib.request, sys,base64,json,os,time,string,re
from PIL import Image
from aip import AipOcr
from aitext import Ai
start = time.time()
os.system("adb shell /system/bin/screencap -p /sdcard/screenshot.png")
os.system("adb pull /sdcard/screenshot.png ./screenshot.png")
'''
汉王ocr 涨价涨价了。。
host = 'http://text.aliapi.hanvon.com'
path = '/rt/ws/v1/ocr/text/recg'
method = 'POST'
appcode = 'a962e94260ee4043b824d2f40c126d8e' #汉王识别appcode(填你自己的)
querys = 'code=74e51a88-41ec-413e-b162-bd031fe0407e'
bodys = {}
url = host + path + '?' + querys
'''
""" (百度ocr)你的 APPID AK SK """
APP_ID = '10670003'
API_KEY = '<KEY>'
SECRET_KEY = '<KEY>'
client = AipOcr(APP_ID, API_KEY, SECRET_KEY)
im = Image.open(r"./screenshot.png")
img_size = im.size
w = im.size[0]
h = im.size[1]
print("xx:{}".format(img_size))
region = im.crop((70,200, w-70,1200)) #裁剪的区域
region.save(r"./crop_test1.png")
""" 读取图片 """
def get_file_content(filePath):
with open(filePath, 'rb') as fp:
return fp.read()
image = get_file_content(r"./crop_test1.png")
respon = client.basicGeneral(image) #用完500次后可改 respon = client.basicAccurate(image) 这个还可用50次
titles = respon['words_result'] #获取问题
issue = ''
answer = ['','','','','','']
countone = 0
answercount = 0
for title in titles:
countone+=1
if(countone >=len(titles)-2):
answer[answercount] = title['words']
answercount+=1
else:
issue = issue +title['words']
tissue = issue[1:2]
if str.isdigit(tissue): #去掉题目索引
issue = issue[3:]
else:
issue = issue[2:]
print(issue) #打印问题
print(' A:'+answer[0]+' B:'+answer[1]+' C:'+answer[2]) #打印答案
keyword = issue #识别的问题文本
ai=Ai(issue,answer)
ai.search()
'''
convey = 'n'
if convey == 'y' or convey == 'Y':
results = baiduSearch.search(keyword, convey=True)
elif convey == 'n' or convey == 'N' or not convey:
results = baiduSearch.search(keyword)
else:
print('输入错误')
exit(0)
count = 0
for result in results:
#print('{0} {1} {2} {3} {4}'.format(result.index, result.title, result.abstract, result.show_url, result.url)) # 此处应有格式化输出
print('{0}'.format(result.abstract)) # 此处应有格式化输出
count=count+1
if(count == 2): #这里限制了只显示2条结果,可以自己设置
break
'''
end = time.time()
print('程序用时:'+str(end-start)+'秒')
|
StarcoderdataPython
|
6552335
|
<filename>src/submanager/utils/output.py
"""Utility functions and classes for handling and printing output."""
# Future imports
from __future__ import (
annotations,
)
def format_error(error: BaseException) -> str:
"""Format an error as a human-readible string."""
return f"{type(error).__name__}: {error}"
def print_error(error: BaseException) -> None:
"""Print the error in a human-readible format for end users."""
print(format_error(error))
class VerbosePrinter:
"""Simple wrapper that only prints if verbose is set."""
def __init__(self, enable: bool = True) -> None:
self.enable = enable
def __call__(self, *text: str) -> None:
"""If verbose is set, print the text."""
if self.enable:
print(*text)
class FancyPrinter(VerbosePrinter):
"""Simple print wrapper with a few extra features."""
def __init__(
self,
enable: bool = True,
*,
char: str = "#",
step: int = 6,
level: int | None = None,
sep: str = " ",
before: str = "",
after: str = "",
) -> None:
super().__init__(enable=enable)
self.char = char
self.step = step
self.level = level
self.sep = sep
self.before = before
self.after = after
def wrap_text(self, *text: str, level: int | None) -> str:
"""Wrap the text in the configured char, up to the specified level."""
text_joined = self.sep.join(text)
if level and level > 0:
wrapping = self.char * (level * self.step)
text_joined = f"{wrapping} {text_joined} {wrapping}"
text_joined = f"{self.before}{text_joined}{self.after}"
return text_joined
def __call__(self, *text: str, level: int | None = None) -> None:
"""Wrap the text at a certain level given the defaults."""
print(self.wrap_text(*text, level=level))
|
StarcoderdataPython
|
6494812
|
import theano.tensor as T
from .layer import Layer
from ..utils.utils_functions import ActivationFunctions
from ..utils.utils_translation import TextTranslation
__all__ = ['Convolution1D', 'Convolution2D']
class ConvolutionBase(Layer):
""" Convolution Layer Class Base-
Parameters
----------
num_filters : int
Number of filters.
filter_size : int or tuple[]
The tuple has the filter size.
input_shape : tuple[]
The tuple has the batch size, num input feature maps and input data size.
stride : int
pad : int
untie_biases : bool
filter_flip : bool
non_linearity : callable
"""
# noinspection PyTypeChecker
def __init__(self, num_filters, filter_size, input_shape=None, stride=1, pad=0, untie_biases=False,
filter_flip=True, non_linearity=ActivationFunctions.linear):
self._num_filters = num_filters
self._filter_size = filter_size
self._stride = stride
self._pad = pad
self._untie_biases = untie_biases
self._filter_flip = filter_flip
if input_shape is None:
self._batch_size = None
self._num_feature_maps = None
output_shape = None
else:
self._batch_size = input_shape[0]
self._num_feature_maps = input_shape[1]
output_shape = ((self._batch_size, self._num_filters) +
tuple(self._get_size_output(_input, _filter, s, p)
for _input, _filter, s, p
in zip(input_shape[2:], self._filter_size,
self._stride, pad)))
super(ConvolutionBase, self).__init__(input_shape=input_shape,
output_shape=output_shape,
non_linearity=non_linearity)
# noinspection PyTypeChecker
def set_input_shape(self, shape):
""" Set input shape.
Parameters
----------
shape : tuple[]
Shape of input.
"""
self._input_shape = shape
self._output_shape = list(shape)
self._batch_size = self._input_shape[0]
self._num_feature_maps = self._input_shape[1]
output_shape = ((self._batch_size, self._num_filters) +
tuple(self._get_size_output(_input, _filter, s, p)
for _input, _filter, s, p
in zip(self._input_shape[2:], self._filter_size,
self._stride, self._pad)))
self._output_shape = tuple(output_shape)
@staticmethod
def _get_size_output(input_size, filter_size, stride, pad):
""" Gets size output.
Parameters
----------
input_size : int
Size of input layer.
filter_size : int
Size of filter (used in convolution).
stride : int
pad : int
Returns
-------
int
Returns size of output.
"""
# extract from Lasagne Library
if input_size is None:
return None
if pad == 'valid':
output_size = input_size - filter_size + 1
elif pad == 'full':
output_size = input_size + filter_size - 1
elif pad == 'same':
output_size = input_size
elif isinstance(pad, int):
output_size = input_size + 2 * pad - filter_size + 1
else:
raise ValueError(TextTranslation().get_str('Invalid_pad') + ' : %s' % pad)
output_size = (output_size + stride - 1) // stride
return output_size
def get_dim_conv(self):
""" Gets dimension convolution.
Returns
-------
int
Returns dimension of convolution.
"""
return len(self.get_input_shape()) - 2
def get_shape_W(self):
""" Gets shape weights of layer.
"""
return (self._num_filters, self._num_feature_maps) + self._filter_size
def get_shape_b(self):
""" Gets shape bias of layer.
Returns
-------
int
Returns number of filters.
"""
return self._num_filters,
# noinspection PyTypeChecker
def output(self, x, prob=True):
""" Return output of layers
Parameters
----------
x : theano.tensor.matrix
Input sample
prob : bool
No used.
Returns
-------
theano.tensor.matrix
Returns the output layers.
"""
convolution = self.convolution(x)
if self.get_b() is None:
activation = convolution
elif self._untie_biases:
activation = convolution + T.shape_padleft(self.get_b(), 1)
else:
activation = convolution + self.get_b().dimshuffle(('x', 0) + ('x',) * self.get_dim_conv())
return self._non_linearity(activation)
def convolution(self, x):
""" Compute the convolution.
"""
raise NotImplementedError
def conv1d_mc0(_input, filters, image_shape=None, filter_shape=None,
border_mode='valid', subsample=(1,), filter_flip=True):
""" Generate convolution 1D using conv2d with width == 1.
Parameters
----------
_input : theano.tensor.shared
Input layer.
filters : theano.tensor.shared
Filters.
image_shape : tuple[]
Shape of image or array with 1D signals.
filter_shape : tuple[]
Shape of filters.
border_mode : tuple[] or int
Border mode.
subsample
Subsample.
filter_flip
Filter flip.
Returns
-------
theano.tensor
Returns convolution 1D.
"""
# extract from Lasagne Library
if image_shape is None:
image_shape_mc0 = None
else:
# (b, c, i0) to (b, c, 1, i0)
image_shape_mc0 = (image_shape[0], image_shape[1], 1, image_shape[2])
if filter_shape is None:
filter_shape_mc0 = None
else:
filter_shape_mc0 = (filter_shape[0], filter_shape[1], 1,
filter_shape[2])
if isinstance(border_mode, tuple):
(border_mode,) = border_mode
if isinstance(border_mode, int):
border_mode = (0, border_mode)
input_mc0 = _input.dimshuffle(0, 1, 'x', 2)
filters_mc0 = filters.dimshuffle(0, 1, 'x', 2)
conved = T.nnet.conv2d(
input_mc0, filters_mc0, image_shape_mc0, filter_shape_mc0,
subsample=(1, subsample[0]), border_mode=border_mode,
filter_flip=filter_flip)
return conved[:, :, 0, :] # drop the unused dimension
class Convolution1D(ConvolutionBase):
""" Convolution 1D Layer.
"""
def __init__(self, num_filters, filter_size, input_shape=None, stride=1, pad=0, untie_biases=False,
filter_flip=True, non_linearity=ActivationFunctions.linear):
super(Convolution1D, self).__init__(num_filters=num_filters, filter_size=filter_size, input_shape=input_shape,
stride=stride, pad=pad, untie_biases=untie_biases,
filter_flip=filter_flip, non_linearity=non_linearity)
def convolution(self, _input):
""" Convolution 1D Function.
Parameters
----------
_input : theano.tensor
Input sample.
Returns
-------
theano.tensor
Returns convolution 1D.
"""
border_mode = 'half' if self._pad == 'same' else self._pad
return conv1d_mc0(_input, self.get_W(),
self._input_shape, self.get_shape_W(),
subsample=self._stride,
border_mode=border_mode,
# filter_flip=self._filter_flip
)
class Convolution2D(ConvolutionBase):
""" Convolution 2D Layer.
"""
def __init__(self, num_filters, filter_size, input_shape=None, stride=(1, 1), pad=(0, 0),
untie_biases=False, filter_flip=True, non_linearity=ActivationFunctions.linear):
super(Convolution2D, self).__init__(num_filters=num_filters, filter_size=filter_size, input_shape=input_shape,
stride=stride, pad=pad, untie_biases=untie_biases,
filter_flip=filter_flip, non_linearity=non_linearity)
def convolution(self, _input, _conv=T.nnet.conv2d):
""" Convolution 2D Function.
Parameters
----------
_input : theano.tensor
Input sample.
_conv : theano.Op
Convolution function.
Returns
-------
theano.tensor
Returns convolution 1D.
"""
border_mode = 'half' if self._pad == 'same' else self._pad
return _conv(_input, self.get_W(),
self._input_shape, self.get_shape_W(),
subsample=self._stride,
border_mode=border_mode,
filter_flip=self._filter_flip
)
|
StarcoderdataPython
|
3511421
|
<gh_stars>0
# Generated by Django 3.1.1 on 2020-09-20 22:45
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('equipment', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='adventuringgear',
name='authorized_editors',
field=models.ManyToManyField(blank=True, related_name='_adventuringgear_authorized_editors_+', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='armor',
name='authorized_editors',
field=models.ManyToManyField(blank=True, related_name='_armor_authorized_editors_+', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='tool',
name='authorized_editors',
field=models.ManyToManyField(blank=True, related_name='_tool_authorized_editors_+', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='weapon',
name='authorized_editors',
field=models.ManyToManyField(blank=True, related_name='_weapon_authorized_editors_+', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='wondrousitem',
name='authorized_editors',
field=models.ManyToManyField(blank=True, related_name='_wondrousitem_authorized_editors_+', to=settings.AUTH_USER_MODEL),
),
]
|
StarcoderdataPython
|
6517138
|
#! /usr/bin/env python
import sys, time
import Pyro.naming, Pyro.core
from Pyro.protocol import getHostname
# initialize the client and set the default namespace group
Pyro.core.initClient()
# locate the NS
locator = Pyro.naming.NameServerLocator()
print 'Searching Naming Service...',
ns = locator.getNS()
print 'Naming Service found at',ns.URI.address,'('+(Pyro.protocol.getHostname(ns.URI.address) or '??')+') port',ns.URI.port
URI=ns.resolve(':test.autoreconnect')
obj = Pyro.core.getAttrProxyForURI(URI)
while 1:
print 'call...'
try:
obj.method(42)
print 'Sleeping 1 second'
time.sleep(1)
#obj._release() # experiment with this
#print 'released'
#time.sleep(2)
except Pyro.errors.ConnectionClosedError,x: # or possibly even ProtocolError
print 'Connection lost. REBINDING...'
print '(restart the server now)'
obj.adapter.rebindURI(tries=10)
|
StarcoderdataPython
|
6541769
|
<reponame>kreczko/l1t-cli
"""
dqm gui setup:
Sets up the DQM GUI. It will be available at port localhost:8060/dqm/dev
From https://twiki.cern.ch/twiki/bin/view/CMS/DQMGuiForUsers
Usage:
dqm gui setup
"""
import logging
import os
import string
import hepshell
from hepshell.interpreter import time_function
from l1t_cli.setup import WORKSPACE
from l1t_cli.setup import INTEGRATION_TAG
from l1t_cli.common import is_vagrant_host
LOG = logging.getLogger(__name__)
DQM_GIT = 'https://github.com/dmwm/deployment.git'
DQM_PATH = os.path.join(WORKSPACE, 'dqm')
DQM_GUI_PATH = os.path.join(DQM_PATH, 'gui')
if is_vagrant_host():
# the vagrant box is a bit special since the working directory
# is a shared directory with the host machine
# this causes problems with rpm DB creation
# since we do not want to use NFS mounts instead (requires sudo on host)
# lets change the path to something on the machine
DQM_GUI_PATH = DQM_GUI_PATH.replace(WORKSPACE, '/opt')
# latest tag from https://github.com/dmwm/deployment/releases
DQM_TAG = 'HG1610a'
SCRAM_ARCH = 'slc6_amd64_gcc493'
class Command(hepshell.Command):
def __init__(self, path=__file__, doc=__doc__):
super(Command, self).__init__(path, doc)
@time_function('dqm gui setup', LOG)
def run(self, args, variables):
self.__prepare(args, variables)
if not self.__can_run():
LOG.error('DQM install path already exists ({0})'.format(DQM_GUI_PATH))
return False
os.makedirs(DQM_GUI_PATH)
from hepshell.interpreter import call
git_command = 'git clone {DQM_GIT} {DQM_GUI_PATH}/deployment'.format(
DQM_GIT=DQM_GIT, DQM_GUI_PATH=DQM_GUI_PATH
)
code, _, err = call(git_command, logger=LOG, shell=True)
if not code == 0:
self.__text = 'Something went wrong:\n'
self.__text += err
deploy_command = ' '.join([
'{DQM_GUI_PATH}/deployment/Deploy',
'-A {SCRAM_ARCH}',
'-r "comp=comp"',
'-R comp@{DQM_TAG}',
'-t MYDEV',
'-s "prep sw post"',
DQM_GUI_PATH,
'dqmgui/bare',
]
)
deploy_command = deploy_command.format(
DQM_GUI_PATH=DQM_GUI_PATH, SCRAM_ARCH=SCRAM_ARCH, DQM_TAG=DQM_TAG
)
code, _, err = call(deploy_command, logger=LOG, shell=True)
if not code == 0:
self.__text = 'Something went wrong:\n'
self.__text += err
self.__text = 'You can now run "l1t run dqm gui"'
return True
def __can_run(self):
return not os.path.exists(DQM_GUI_PATH)
|
StarcoderdataPython
|
11228443
|
getAllObjects = [{
'accountId': 123456,
'createDate': '2020-09-15T13:12:08-06:00',
'id': 112356450,
'modifyDate': '2020-09-15T13:13:13-06:00',
'status': 'COMPLETED',
'userRecordId': 987456321,
'userRecord': {
'username': '<EMAIL>'
},
'items': [
{
'categoryCode': 'port_speed',
'description': '100 Mbps Private Network Uplink'
},
{
'categoryCode': 'service_port',
'description': '100 Mbps Private Uplink'
},
{
'categoryCode': 'public_port',
'description': '0 Mbps Public Uplink'
}
],
'orderApprovalDate': '2020-09-15T13:13:13-06:00',
'orderTotalAmount': '0'
},
{
'accountId': 123456,
'createDate': '2019-09-15T13:12:08-06:00',
'id': 645698550,
'modifyDate': '2019-09-15T13:13:13-06:00',
'status': 'COMPLETED',
'userRecordId': 987456321,
'userRecord': {
'username': '<EMAIL>'
},
'items': [
{
'categoryCode': 'port_speed',
'description': '100 Mbps Private Network Uplink'
},
],
'orderApprovalDate': '2019-09-15T13:13:13-06:00',
'orderTotalAmount': '0'
}]
getObject = {
'accountId': 1234,
'createDate': '2020-09-23T16:22:30-06:00',
'id': 6543210,
'impersonatingUserRecordId': None,
'initialInvoice': {
'amount': '0',
'id': 60012345,
'invoiceTotalAmount': '0'
},
'items': [{
'description': 'Dual Intel Xeon Silver 4210 (20 Cores, 2.20 GHz)'
}],
'modifyDate': '2020-09-23T16:22:32-06:00',
'orderQuoteId': None,
'orderTypeId': 11,
'presaleEventId': None,
'privateCloudOrderFlag': False,
'status': 'APPROVED',
'userRecord': {
'displayName': 'testUser'
},
'userRecordId': 7654321,
}
|
StarcoderdataPython
|
3231585
|
import traceback
from flask import current_app
from urllib.parse import urljoin
from ..lib import utils
from .base import db
from .setting import Setting
from .user import User
from .account_user import AccountUser
class Account(db.Model):
__tablename__ = 'account'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(40), index=True, unique=True, nullable=False)
description = db.Column(db.String(128))
contact = db.Column(db.String(128))
mail = db.Column(db.String(128))
domains = db.relationship("Domain", back_populates="account")
apikeys = db.relationship("ApiKey",
secondary="apikey_account",
back_populates="accounts")
def __init__(self, name=None, description=None, contact=None, mail=None):
self.name = name
self.description = description
self.contact = contact
self.mail = mail
# PDNS configs
self.PDNS_STATS_URL = Setting().get('pdns_api_url')
self.PDNS_API_KEY = Setting().get('pdns_api_key')
self.PDNS_VERSION = Setting().get('pdns_version')
self.API_EXTENDED_URL = utils.pdns_api_extended_uri(self.PDNS_VERSION)
if self.name is not None:
self.name = ''.join(c for c in self.name.lower()
if c in "abcdefghijklmnopqrstuvwxyz0123456789")
def __repr__(self):
return '<Account {0}r>'.format(self.name)
def get_name_by_id(self, account_id):
"""
Convert account_id to account_name
"""
account = Account.query.filter(Account.id == account_id).first()
if account is None:
return ''
return account.name
def get_id_by_name(self, account_name):
"""
Convert account_name to account_id
"""
# Skip actual database lookup for empty queries
if account_name is None or account_name == "":
return None
account = Account.query.filter(Account.name == account_name).first()
if account is None:
return None
return account.id
def create_account(self):
"""
Create a new account
"""
# Sanity check - account name
if self.name == "":
return {'status': False, 'msg': 'No account name specified'}
# check that account name is not already used
account = Account.query.filter(Account.name == self.name).first()
if account:
return {'status': False, 'msg': 'Account already exists'}
db.session.add(self)
db.session.commit()
return {'status': True, 'msg': 'Account created successfully'}
def update_account(self):
"""
Update an existing account
"""
# Sanity check - account name
if self.name == "":
return {'status': False, 'msg': 'No account name specified'}
# read account and check that it exists
account = Account.query.filter(Account.name == self.name).first()
if not account:
return {'status': False, 'msg': 'Account does not exist'}
account.description = self.description
account.contact = self.contact
account.mail = self.mail
db.session.commit()
return {'status': True, 'msg': 'Account updated successfully'}
def delete_account(self, commit=True):
"""
Delete an account
"""
# unassociate all users first
self.grant_privileges([])
try:
Account.query.filter(Account.name == self.name).delete()
if commit:
db.session.commit()
return True
except Exception as e:
db.session.rollback()
current_app.logger.error(
'Cannot delete account {0} from DB. DETAIL: {1}'.format(
self.name, e))
return False
def get_user(self):
"""
Get users (id) associated with this account
"""
user_ids = []
query = db.session.query(
AccountUser,
Account).filter(User.id == AccountUser.user_id).filter(
Account.id == AccountUser.account_id).filter(
Account.name == self.name).all()
for q in query:
user_ids.append(q[0].user_id)
return user_ids
def grant_privileges(self, new_user_list):
"""
Reconfigure account_user table
"""
account_id = self.get_id_by_name(self.name)
account_user_ids = self.get_user()
new_user_ids = [
u.id
for u in User.query.filter(User.username.in_(new_user_list)).all()
] if new_user_list else []
removed_ids = list(set(account_user_ids).difference(new_user_ids))
added_ids = list(set(new_user_ids).difference(account_user_ids))
try:
for uid in removed_ids:
AccountUser.query.filter(AccountUser.user_id == uid).filter(
AccountUser.account_id == account_id).delete()
db.session.commit()
except Exception as e:
db.session.rollback()
current_app.logger.error(
'Cannot revoke user privileges on account {0}. DETAIL: {1}'.
format(self.name, e))
try:
for uid in added_ids:
au = AccountUser(account_id, uid)
db.session.add(au)
db.session.commit()
except Exception as e:
db.session.rollback()
current_app.logger.error(
'Cannot grant user privileges to account {0}. DETAIL: {1}'.
format(self.name, e))
def revoke_privileges_by_id(self, user_id):
"""
Remove a single user from privilege list based on user_id
"""
new_uids = [u for u in self.get_user() if u != user_id]
users = []
for uid in new_uids:
users.append(User(id=uid).get_user_info_by_id().username)
self.grant_privileges(users)
def add_user(self, user):
"""
Add a single user to Account by User
"""
try:
au = AccountUser(self.id, user.id)
db.session.add(au)
db.session.commit()
return True
except Exception as e:
db.session.rollback()
current_app.logger.error(
'Cannot add user privileges on account {0}. DETAIL: {1}'.
format(self.name, e))
return False
def remove_user(self, user):
"""
Remove a single user from Account by User
"""
# TODO: This func is currently used by SAML feature in a wrong way. Fix it
try:
AccountUser.query.filter(AccountUser.user_id == user.id).filter(
AccountUser.account_id == self.id).delete()
db.session.commit()
return True
except Exception as e:
db.session.rollback()
current_app.logger.error(
'Cannot revoke user privileges on account {0}. DETAIL: {1}'.
format(self.name, e))
return False
def update(self):
"""
Fetch accounts from PowerDNS and syncs them into DB
"""
db_accounts = Account.query.all()
list_db_accounts = [d.name for d in db_accounts]
current_app.logger.info("Found {} accounts in PowerDNS-Admin".format(
len(list_db_accounts)))
headers = {'X-API-Key': self.PDNS_API_KEY}
try:
jdata = utils.fetch_json(
urljoin(self.PDNS_STATS_URL,
self.API_EXTENDED_URL + '/servers/localhost/zones'),
headers=headers,
timeout=int(Setting().get('pdns_api_timeout')),
verify=Setting().get('verify_ssl_connections'))
list_jaccount = set(d['account'] for d in jdata if d['account'])
current_app.logger.info("Found {} accounts in PowerDNS".format(
len(list_jaccount)))
try:
# Remove accounts that don't exist any more
should_removed_db_account = list(
set(list_db_accounts).difference(list_jaccount))
for account_name in should_removed_db_account:
account_id = self.get_id_by_name(account_name)
if not account_id:
continue
current_app.logger.info("Deleting account for {0}".format(account_name))
account = Account.query.get(account_id)
account.delete_account(commit=False)
except Exception as e:
current_app.logger.error(
'Can not delete account from DB. DETAIL: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
for account_name in list_jaccount:
account_id = self.get_id_by_name(account_name)
if account_id:
continue
current_app.logger.info("Creating account for {0}".format(account_name))
account = Account(name=account_name)
db.session.add(account)
db.session.commit()
current_app.logger.info('Update accounts finished')
return {
'status': 'ok',
'msg': 'Account table has been updated successfully'
}
except Exception as e:
db.session.rollback()
current_app.logger.error(
'Cannot update account table. Error: {0}'.format(e))
return {'status': 'error', 'msg': 'Cannot update account table'}
|
StarcoderdataPython
|
1877445
|
from unittest import TestCase
from eynnyd.exceptions import RouteBuildException, NonCallableInterceptor, \
NonCallableHandler, CallbackIncorrectNumberOfParametersException
from eynnyd.routes_builder import RoutesBuilder
class TestRoutesBuilder(TestCase):
def test_add_uncallable_request_interceptor_raises(self):
builder = RoutesBuilder()
with self.assertRaises(NonCallableInterceptor):
builder.add_request_interceptor("/foo/bar", "not callable")
def test_add_too_few_param_request_interceptor_raises(self):
def test_interceptor():
pass
builder = RoutesBuilder()
with self.assertRaises(CallbackIncorrectNumberOfParametersException):
builder.add_request_interceptor("/foo/bar", test_interceptor)
def test_add_too_many_param_request_interceptor_raises(self):
def test_interceptor(one_param, two_many_param):
pass
builder = RoutesBuilder()
with self.assertRaises(CallbackIncorrectNumberOfParametersException):
builder.add_request_interceptor("/foo/bar", test_interceptor)
def test_add_request_interceptor_on_repeating_path_param_name_raises(self):
def test_interceptor(one_param):
pass
builder = RoutesBuilder()
with self.assertRaises(RouteBuildException):
builder.add_request_interceptor("/foo/{bar}/123/{bar}", test_interceptor)
def test_add_uncallable_response_interceptor_raises(self):
builder = RoutesBuilder()
with self.assertRaises(NonCallableInterceptor):
builder.add_response_interceptor("/foo/bar", "not callable")
def test_add_too_few_param_response_interceptor_raises(self):
def test_interceptor(one_param):
pass
builder = RoutesBuilder()
with self.assertRaises(CallbackIncorrectNumberOfParametersException):
builder.add_response_interceptor("/foo/bar", test_interceptor)
def test_add_too_many_param_response_interceptor_raises(self):
def test_interceptor(one_param, two_param, three_many_param):
pass
builder = RoutesBuilder()
with self.assertRaises(CallbackIncorrectNumberOfParametersException):
builder.add_response_interceptor("/foo/bar", test_interceptor)
def test_add_response_interceptor_on_repeating_path_param_name_raises(self):
def test_interceptor(one_param, two_param):
pass
builder = RoutesBuilder()
with self.assertRaises(RouteBuildException):
builder.add_response_interceptor("/foo/{bar}/123/{bar}", test_interceptor)
def test_add_uncallable_handler_raises(self):
builder = RoutesBuilder()
with self.assertRaises(NonCallableHandler):
builder.add_handler("GET", "/foo/bar", "not callable")
def test_add_too_few_param_handler_raises(self):
def test_handler():
pass
builder = RoutesBuilder()
with self.assertRaises(CallbackIncorrectNumberOfParametersException):
builder.add_handler("GET", "/foo/bar", test_handler)
def test_add_too_many_param_handler_raises(self):
def test_handler(one_param, two_param):
pass
builder = RoutesBuilder()
with self.assertRaises(CallbackIncorrectNumberOfParametersException):
builder.add_handler("GET", "/foo/bar", test_handler)
def test_add_handler_on_repeating_path_param_name_raises(self):
def test_handler(one_param):
pass
builder = RoutesBuilder()
with self.assertRaises(RouteBuildException):
builder.add_handler("GET", "/foo/{bar}/123/{bar}", test_handler)
def test_add_duplicate_route_handler_raises(self):
def test_handler(one_param):
pass
def test_another_handler(one_param):
pass
builder = RoutesBuilder()
builder.add_handler("GET", "/foo/bar", test_handler)
with self.assertRaises(RouteBuildException):
builder.add_handler("GET", "/foo/bar", test_another_handler)
|
StarcoderdataPython
|
4906534
|
<reponame>zopefoundation/zope.app.applicationcontrol
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""ZODB Control Tests
"""
import unittest
from zope.app.applicationcontrol.browser.tests import BrowserTestCase
class ErrorRedirectTest(BrowserTestCase):
def testErrorRedirect(self):
response = self.publish('/++etc++process/@@errorRedirect.html',
basic='globalmgr:globalmgrpw')
self.assertEqual('http://localhost/@@errorRedirect.html',
response.location)
self.assertEqual(302, response.status_int)
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
StarcoderdataPython
|
1963278
|
food = (
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5792078",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5777522",
"https://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5332821",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=6529876",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5632000",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5524485",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=7200975",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=8479392",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5354603",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5546868",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5386326",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5549525",
"https://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5846444",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5346385",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=6367148",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5803155",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=8788830",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5933633",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5772374",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5772355",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5777549",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5020728",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5283946",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5423042",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5480345",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5481165",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=9378503",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5653467",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5733319",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5378723",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=8688402",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=6563584",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5783675",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5777427",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5342068",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5805179",
# "http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5326271",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5114514",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5172495",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5550695",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=9378503",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=6563491",
# "http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5526038",
"https://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5887969",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5724648",
# "http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5822441",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5431965",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5777549",
# "http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5644187",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5652647",
)
pet = (
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5447408",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5319085",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5497010",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5904676",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=9014593",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5578577",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5378917",
# "http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=6246990",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5388886",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5777515",
# "http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=6235970",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5156182",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5701375",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=8735270",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5381739",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5885451",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5368413",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5215993",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5305885",
# "http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5317405",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=6318288",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5160068",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5701373",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=6461783",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=8773425",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5735976",
# "http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5988919",
# "http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5742996",
"https://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5460684",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5655212",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5347987",
# "http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5959379",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=8736186",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=8775206",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5885911",
# "http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5193610",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=6340871",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5655228",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5155368",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5367669",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=6330705",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5547163",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=8734224",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=6043606",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5845704",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=6227319",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5777582",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5569539",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=8775156",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=6318468",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5348020",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5786497",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=6318804",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5703473",
# "http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5712699",
)
history = (
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=9000460",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=9587339",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=9807578",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=9618688",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=9460725",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5583982",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5453500",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5377452",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=6105394",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5381939",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5601309",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5601315",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=6106373",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=6106235",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=6106095",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=6106036",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=6105192",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5694732",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5601304",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5708971",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5496740",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5497496",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5505027",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5488727",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5655263",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5637277",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5639751",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5639754",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5639758",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5026435",
# "http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5777645",
)
joke = (
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5228481",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5049022",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5255176",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5055370",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5051854",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5133816",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5696776",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=6234755",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5256304",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5625282",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=9086838",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5369165",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5362235",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5185323",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5095183",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5277282",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5254099",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5069574",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5352001",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5052724",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=6186504",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5007073",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5191910",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5780347",
)
star = (
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5716121",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5316806",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5204221",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5594444",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5417411",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5630608",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5520031",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5676640",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5386067",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5737125",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=6071863",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5058139",
"http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=5532883",
)
|
StarcoderdataPython
|
172486
|
from mesa.datacollection import DataCollector
from mesa import Model
from mesa.time import RandomActivation
from mesa_geo.geoagent import GeoAgent, AgentCreator
from mesa_geo import GeoSpace
import random
class SchellingAgent(GeoAgent):
"""Schelling segregation agent."""
def __init__(self, unique_id, model, shape, agent_type=None):
"""Create a new Schelling agent.
Args:
unique_id: Unique identifier for the agent.
agent_type: Indicator for the agent's type (minority=1, majority=0)
"""
super().__init__(unique_id, model, shape)
self.atype = agent_type
def step(self):
"""Advance agent one step."""
similar = 0
different = 0
neighbors = self.model.grid.get_neighbors(self)
if neighbors:
for neighbor in neighbors:
if neighbor.atype is None:
continue
elif neighbor.atype == self.atype:
similar += 1
else:
different += 1
# If unhappy, move:
if similar < different:
# Select an empty region
empties = [a for a in self.model.grid.agents if a.atype is None]
# Switch atypes and add/remove from scheduler
new_region = random.choice(empties)
new_region.atype = self.atype
self.model.schedule.add(new_region)
self.atype = None
self.model.schedule.remove(self)
else:
self.model.happy += 1
def __repr__(self):
return "Agent " + str(self.unique_id)
class SchellingModel(Model):
"""Model class for the Schelling segregation model."""
def __init__(self, density, minority_pc):
self.density = density
self.minority_pc = minority_pc
self.schedule = RandomActivation(self)
self.grid = GeoSpace()
self.happy = 0
self.datacollector = DataCollector({"happy": "happy"})
self.running = True
# Set up the grid with patches for every NUTS region
AC = AgentCreator(SchellingAgent, {"model": self})
agents = AC.from_file("nuts_rg_60M_2013_lvl_2.geojson")
self.grid.add_agents(agents)
# Set up agents
for agent in agents:
if random.random() < self.density:
if random.random() < self.minority_pc:
agent.atype = 1
else:
agent.atype = 0
self.schedule.add(agent)
def step(self):
"""Run one step of the model.
If All agents are happy, halt the model.
"""
self.happy = 0 # Reset counter of happy agents
self.schedule.step()
# self.datacollector.collect(self)
if self.happy == self.schedule.get_agent_count():
self.running = False
|
StarcoderdataPython
|
1762882
|
<filename>rankers/LevenshteinRanker/tests/test_levenshteinranker.py
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina.executors.rankers import Match2DocRanker
from .. import LevenshteinRanker
def test_levenshteinranker():
queries_metas = [{'text': 'cool stuff'}, {'text': 'cool stuff'}]
old_matches_scores = [[5, 4], [5, 4]]
matches_metas = [
[{'text': 'cool stuff'}, {'text': 'kewl stuff'}],
[{'text': 'cool stuff'}, {'text': 'kewl stuff'}],
]
ranker = LevenshteinRanker()
new_scores = ranker.score(
old_matches_scores,
queries_metas,
matches_metas,
)
assert len(new_scores) == 2
assert new_scores[0] == [0, -3]
assert new_scores[1] == [0, -3]
|
StarcoderdataPython
|
4884460
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ImageReference(Model):
"""
The image reference.
:param publisher: Gets or sets the image publisher.
:type publisher: str
:param offer: Gets or sets the image offer.
:type offer: str
:param sku: Gets or sets the image sku.
:type sku: str
:param version: Gets or sets the image version. The allowed formats are
Major.Minor.Build or 'latest'. Major, Minor and Build being decimal
numbers. Specify 'latest' to use the latest version of image.
:type version: str
"""
_attribute_map = {
'publisher': {'key': 'publisher', 'type': 'str'},
'offer': {'key': 'offer', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
}
def __init__(self, publisher=None, offer=None, sku=None, version=None):
self.publisher = publisher
self.offer = offer
self.sku = sku
self.version = version
|
StarcoderdataPython
|
3552802
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: modules/dreamview/proto/hmi_status.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from modules.monitor.proto import system_status_pb2 as modules_dot_monitor_dot_proto_dot_system__status__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='modules/dreamview/proto/hmi_status.proto',
package='apollo.dreamview',
syntax='proto2',
serialized_pb=_b('\n(modules/dreamview/proto/hmi_status.proto\x12\x10\x61pollo.dreamview\x1a)modules/monitor/proto/system_status.proto\"\x8e\x01\n\tHMIStatus\x12\x33\n\rsystem_status\x18\x01 \x01(\x0b\x32\x1c.apollo.monitor.SystemStatus\x12\x13\n\x0b\x63urrent_map\x18\x02 \x01(\t\x12\x17\n\x0f\x63urrent_vehicle\x18\x03 \x01(\t\x12\x1e\n\x0c\x63urrent_mode\x18\x04 \x01(\t:\x08Standard')
,
dependencies=[modules_dot_monitor_dot_proto_dot_system__status__pb2.DESCRIPTOR,])
_HMISTATUS = _descriptor.Descriptor(
name='HMIStatus',
full_name='apollo.dreamview.HMIStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='system_status', full_name='apollo.dreamview.HMIStatus.system_status', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='current_map', full_name='apollo.dreamview.HMIStatus.current_map', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='current_vehicle', full_name='apollo.dreamview.HMIStatus.current_vehicle', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='current_mode', full_name='apollo.dreamview.HMIStatus.current_mode', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("Standard").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=106,
serialized_end=248,
)
_HMISTATUS.fields_by_name['system_status'].message_type = modules_dot_monitor_dot_proto_dot_system__status__pb2._SYSTEMSTATUS
DESCRIPTOR.message_types_by_name['HMIStatus'] = _HMISTATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
HMIStatus = _reflection.GeneratedProtocolMessageType('HMIStatus', (_message.Message,), dict(
DESCRIPTOR = _HMISTATUS,
__module__ = 'modules.dreamview.proto.hmi_status_pb2'
# @@protoc_insertion_point(class_scope:apollo.dreamview.HMIStatus)
))
_sym_db.RegisterMessage(HMIStatus)
# @@protoc_insertion_point(module_scope)
|
StarcoderdataPython
|
3330269
|
"""
Process launcher for run an ad-hoc job
"""
import yaml
import json
import datetime
import tempfile
import time
import sys
import os
from msbase.utils import getenv, datetime_str
from msbase.logging import logger
from common import get_jobs_config
from model import DB
from resource import Resource
from notif import send_text
def schedule_to_interval(sched):
if sched == "nightly":
return datetime.timedelta(days=1)
raise Exception("Unknown schedule: " + sched)
class Launcher(object):
def __init__(self):
super().__init__()
self.resources_config = yaml.safe_load(open(getenv("CONFIG_RESOURCES"), "r"))
self.db = DB()
def get_storage(self, type_: str):
for s in self.resources_config["storage"]:
if s["type"] == type_:
return s
def get_master_as_compute(self):
return self.get_compute_by_host(self.resources_config["master"]["host"])
def get_master_as_storage(self):
return self.get_storage_by_host(self.resources_config["master"]["host"])
def get_compute(self, type_: str):
# FIXME: better scheduling strategy
for s in self.resources_config["compute"]:
if s["type"] == type_:
return s
def get_compute_by_host(self, host: str):
# FIXME: better scheduling strategy
for s in self.resources_config["compute"]:
if s["host"] == host:
return s
def get_storage_by_host(self, host: str):
# FIXME: better scheduling strategy
for s in self.resources_config["storage"]:
if s["host"] == host:
return s
def create_new_job_row(self, job, compute, storage):
job_started = datetime.datetime.now()
return self.db.insert_row_get_id({
"job_name": job["name"],
"cwd": job["cwd"],
"env": json.dumps(job["env"]),
"job_steps": json.dumps(job["steps"]),
"job_persisted": json.dumps(job["persisted"]),
"job_started": job_started,
"job_status": "running",
"compute": json.dumps(compute),
"storage": json.dumps(storage)
}, "log")
# FIXME: launch job in a new temporary folder
def launch_job(self, job, compute, storage):
# prepare and send task.json
job_id = self.create_new_job_row(job, compute, storage)
task_file = tempfile.NamedTemporaryFile(mode="w", delete=False)
task_file.write(json.dumps({
"cwd": job["cwd"],
"env": job["env"],
"steps": job["steps"]
}))
task_file.close()
resource = Resource(compute, storage)
runner_dir = resource.compute["nightly_tmp"]
resource.scp_to(task_file.name, runner_dir + "/%s-input.json" % job_id, resource.compute)
resource.scp_to("src/runner.py", runner_dir + "/runner.py", resource.compute)
resource.ssh_exec_on_node("cd " + runner_dir + "; nohup python3 runner.py %s > /dev/null 2>&1 &" % job_id, resource.compute)
try:
pid = int(resource.ssh_exec_on_node("sleep 3; cat " + runner_dir + "/" + str(job_id) + "-pid.txt", resource.compute).strip())
except Exception:
import pdb, traceback
extype, value, tb = sys.exc_info()
traceback.print_exc()
send_text("Exception -- in PDB now")
pdb.post_mortem(tb)
self.db.update_pid(job_id, pid)
# launch job and store the running PID
logger.info("Launched job (job_id: %s, PID: %s): %s" % (job_id, pid, job["name"]))
time.sleep(1)
def process_job_to_launch(self, job):
"""
Process job for further scheduling needs
"""
job_name = job["name"]
assert "enabled" in job, job
if not job["enabled"]:
logger.info(f"Job {job_name} is not enabled, thus skipped")
return
if "cwd" not in job:
job["cwd"] = None
if "env" not in job:
job["env"] = {}
if job["schedule"] in ["nightly"]:
# Check if we need to wait until an internal after the last run of the same job finished
last_run = self.db.get_last_run_started(job)
if last_run is not None:
now = datetime.datetime.now()
interval = schedule_to_interval(job["schedule"])
if last_run + interval > now:
return None
elif job["schedule"] == "daemon":
jobs = self.db.fetch_running_jobs_of(job)
if len(jobs) > 0:
assert len(jobs) == 1
return None
elif job["schedule"] == "once":
pass
else:
raise Exception("Unknown schedule: " + job["schedule"])
storage = self.get_storage(job["storage_type"])
if storage is None:
return None
if "host" in job:
compute = self.get_compute_by_host(job["host"])
else:
compute = self.get_compute(job["compute_type"])
if compute is None:
logger.warn("Can't find compute resource for " + str(job))
return None
self.launch_job(job, compute, storage)
self.db.commit()
if __name__ == "__main__":
launcher = Launcher()
if sys.argv[1] == "--job":
job_name = sys.argv[2]
job = get_jobs_config()[job_name]
print(f"Launch job {job_name}: {job}")
launcher.process_job_to_launch(job)
exit(0)
cwd = getenv("NIGHTLY_LAUNCH_CWD")
step = sys.argv[1:]
name = "adhoc-" + datetime_str()
print(f"Launch job {name}:")
print(f"- cwd: {cwd}")
print(f"- step: {step}")
compute = launcher.get_master_as_compute() # FIXME: support other launch as well
storage = launcher.get_master_as_storage()
job = {
"name": name,
"steps": [ step ],
"schedule": "once",
"storage_type": storage["type"],
"persisted": [],
"enabled": True,
"cwd": cwd,
"env": dict(os.environ)
}
launcher.launch_job(job, compute, storage)
|
StarcoderdataPython
|
9647244
|
import sys, os
import py
from jirpa import JiraProxy, JiraProxyError
###############################################################################################
from helper_pak import BasicLogger, excErrorMessage
from jira_targets import GOOD_VANILLA_SERVER_CONFIG
from jira_targets import PROJECT_KEY_1, PROJECT_NAME_1, PROJECT_DESC_1
###############################################################################################
def test_basic_get_projects():
"""
will return a hash of project keys as keys and project names
as values with the getProjects method
"""
jp = JiraProxy(GOOD_VANILLA_SERVER_CONFIG)
projects = jp.getProjects()
assert PROJECT_KEY_1 in projects
assert projects[PROJECT_KEY_1] == "JEST Testing"
def test_get_project_details():
"""
will return a array of project details from all projects with get_projects_details
"""
jp = JiraProxy(GOOD_VANILLA_SERVER_CONFIG)
details = jp.getProjectsDetails()
assert details
tst_details = [entry for entry in details if entry['Key'] == PROJECT_KEY_1]
assert len(tst_details) > 0
assert tst_details[0]["Name"] == PROJECT_NAME_1
assert tst_details[0]["Description"] == PROJECT_DESC_1
assert tst_details[0]["Details"]["lead"]["name"] == GOOD_VANILLA_SERVER_CONFIG['user']
def test_get_project_uses_cached_info():
"""
subsequent calls to getProjects returns the cached information
"""
jp = JiraProxy(GOOD_VANILLA_SERVER_CONFIG)
projects_initial = jp.getProjects()
projects_subsequent = jp.getProjects()
assert id(projects_initial) == id(projects_subsequent)
assert PROJECT_KEY_1 in projects_subsequent
assert projects_subsequent[PROJECT_KEY_1] == "JEST Testing"
|
StarcoderdataPython
|
5021757
|
<reponame>everarch/psets
#
# Matching Start & End
#
# https://www.hackerrank.com/challenges/matching-start-end/problem
#
Regex_Pattern = r"^\d\w{4}\.$" # Do not delete 'r'.
import re
print(str(bool(re.search(Regex_Pattern, input()))).lower())
|
StarcoderdataPython
|
3350345
|
import json
import feedparser
import nltk
from bs4 import BeautifulSoup
from nltk.tokenize import sent_tokenize
nltk.download('punkt')
feeds = ["http://export.arxiv.org/rss/cs.AI",
"http://export.arxiv.org/rss/cs.CL",
"http://export.arxiv.org/rss/cs.CV",
"http://export.arxiv.org/rss/cs.IR",
"http://export.arxiv.org/rss/cs.LG",
"http://export.arxiv.org/rss/stat.ML",
]
def get_title(e):
return e.title.split("(arXiv:")[0].strip()
def get_summary(e):
return BeautifulSoup(e.summary, "lxml").text.replace('\n', " ").strip()
def get_authors(e):
return BeautifulSoup(e.author, "lxml").text.strip()
def clean_text(txt):
return BeautifulSoup(txt, "lxml").text.replace('\n', " ")
def process_feeds(feeds):
entries = []
for feed_url in feeds:
print(f"processing {feed_url}")
feed = feedparser.parse(feed_url)
entries.extend(feed['entries'])
papers = [
{
"id": e.id,
"title": get_title(e),
"authors": get_authors(e),
"abstract": get_summary(e),
"sentences": get_sentences(e)
} for e in entries
]
return papers
def dump_jsonl(records, fname):
with open(fname, "w") as fout:
for r in records:
fout.write(json.dumps(r))
fout.write("\n")
def get_sentences(e):
# title as first sentence
title = get_title(e)
# sentences from summary
summary = clean_text(e.summary)
sentences = sent_tokenize(summary)
return [title] + sentences
def get_sentences_from_txt(txt):
return sent_tokenize(txt)
|
StarcoderdataPython
|
3267921
|
class MaximumNumberOfOccupantsReached(Exception):
pass
class Tenant:
def __init__(self, first_name: str, last_name: str, student_id_number: int) -> None:
self.first_name = first_name
self.last_name = last_name
self.student_id_number = student_id_number
@property
def full_name(self) -> str:
return self.first_name + self.last_name
class Room:
def __init__(self, room_number: str, max_capacity: int) -> None:
self.room_number = room_number
self.max_capacity = max_capacity
self.occupants = set()
@property
def number_of_occupants(self) -> int:
return len(self.occupants)
def add_occupant(self, occupant: Tenant):
if self.can_add_more_occupants:
self.occupants.add(occupant)
else:
raise MaximumNumberOfOccupantsReached
@property
def can_add_more_occupants(self) -> bool:
return self.number_of_occupants < self.max_capacity
def assign_to_room(tenant: Tenant, room: Room):
try:
room.add_occupant(tenant)
except MaximumNumberOfOccupantsReached:
raise MaximumNumberOfOccupantsReached(
f"Can no longer add {tenant.full_name} to Room {room.room_number}."
)
|
StarcoderdataPython
|
3344286
|
from django.conf.urls import include, url
from corehq.apps.app_manager.views import (
AppCaseSummaryView,
AppDataView,
AppDiffView,
AppFormSummaryView,
DownloadAppSummaryView,
DownloadCaseSummaryView,
DownloadCCZ,
DownloadFormSummaryView,
FormHasSubmissionsView,
FormSummaryDiffView,
LanguageProfilesView,
PromptSettingsUpdateView,
app_exchange,
app_from_template,
app_settings,
app_source,
commcare_profile,
compare_multimedia_sizes,
copy_app,
copy_form,
current_app_version,
default_new_app,
delete_app,
delete_copy,
delete_form,
delete_module,
direct_ccz,
download_file,
download_index,
drop_user_case,
edit_add_ons,
edit_advanced_form_actions,
edit_app_attr,
edit_app_langs,
edit_app_ui_translations,
edit_commcare_profile,
edit_commcare_settings,
edit_form_actions,
edit_form_attr,
edit_form_attr_api,
edit_module_attr,
edit_module_detail_screens,
edit_report_module,
edit_schedule_phases,
edit_visit_schedule,
form_source,
form_source_legacy,
get_app_ui_translations,
get_form_data_schema,
get_form_datums,
get_form_questions,
get_multimedia_sizes,
get_xform_source,
import_app,
list_apps,
multimedia_ajax,
new_app,
new_form,
new_module,
odk_install,
odk_media_qr_code,
odk_qr_code,
overwrite_module_case_list,
paginate_releases,
patch_xform,
pull_master_app,
rearrange,
release_build,
revert_to_copy,
save_copy,
short_odk_url,
short_url,
toggle_build_profile,
undo_delete_app,
undo_delete_form,
undo_delete_module,
update_build_comment,
update_linked_whitelist,
upgrade_shadow_module,
validate_form_for_build,
validate_module_for_build,
view_app,
view_form,
view_form_legacy,
view_module,
view_module_legacy,
)
from corehq.apps.app_manager.views.apps import move_child_modules_after_parents
from corehq.apps.app_manager.views.modules import ExistingCaseTypesView
from corehq.apps.hqmedia.views import copy_multimedia
from corehq.apps.hqmedia.urls import application_urls as hqmedia_urls
from corehq.apps.hqmedia.urls import download_urls as media_download_urls
from corehq.apps.linked_domain.views import pull_missing_multimedia
from corehq.apps.translations.views import (
download_bulk_app_translations,
download_bulk_ui_translations,
upload_bulk_app_translations,
upload_bulk_ui_translations,
)
app_urls = [
url(r'^languages/$', view_app, name='app_languages'),
url(r'^languages/translations/download/$', download_bulk_ui_translations, name='download_bulk_ui_translations'),
url(r'^languages/translations/upload/$', upload_bulk_ui_translations, name='upload_bulk_ui_translations'),
url(r'^languages/bulk_app_translations/download/$', download_bulk_app_translations, name='download_bulk_app_translations'),
url(r'^languages/bulk_app_translations/upload/$', upload_bulk_app_translations, name='upload_bulk_app_translations'),
url(r'^multimedia_ajax/$', multimedia_ajax, name='app_multimedia_ajax'),
url(r'^multimedia_sizes/$', get_multimedia_sizes, name='get_multimedia_sizes'),
url(r'^multimedia_sizes/(?P<build_profile_id>[\w-]+)/$', get_multimedia_sizes,
name='get_multimedia_sizes_for_build_profile'),
url(r'^compare_multimedia_sizes/(?P<other_build_id>[\w-]+)/$',
compare_multimedia_sizes, name='compare_multimedia_sizes'),
url(r'^compare_multimedia_sizes/(?P<other_build_id>[\w-]+)/(?P<build_profile_id>[\w-]+)/$',
compare_multimedia_sizes, name='compare_multimedia_sizes_for_build_profile'),
url(r'^$', view_app, name='view_app'),
url(r'^releases/$', view_app, name='release_manager'),
url(r'^settings/$', app_settings, name='app_settings'),
url(r'^add_ons/edit/$', edit_add_ons, name='edit_add_ons'),
url(r'^current_version/$', current_app_version, name='current_app_version'),
url(r'^releases/json/$', paginate_releases, name='paginate_releases'),
url(r'^releases/release/(?P<saved_app_id>[\w-]+)/$', release_build,
name='release_build'),
url(r'^releases/unrelease/(?P<saved_app_id>[\w-]+)/$', release_build,
name='unrelease_build', kwargs={'is_released': False}),
url(r'^releases/profiles/$', LanguageProfilesView.as_view(), name=LanguageProfilesView.urlname),
url(r'^modules-(?P<module_id>[\w-]+)/$', view_module_legacy,
name='view_module_legacy'),
url(r'^module/(?P<module_unique_id>[\w-]+)/$', view_module, name='view_module'),
url(r'^modules-(?P<module_id>[\w-]+)/forms-(?P<form_id>[\w-]+)/$',
view_form_legacy, name='view_form_legacy'),
url(r'^form/(?P<form_unique_id>[\w-]+)/$', view_form, name='view_form'),
url(r'^get_form_datums/$', get_form_datums, name='get_form_datums'),
url(r'^get_form_questions/$', get_form_questions, name='get_form_questions'),
url(r'^form/(?P<form_unique_id>[\w-]+)/source/$', form_source, name='form_source'),
url(r'^modules-(?P<module_id>[\w-]+)/forms-(?P<form_id>[\w-]+)/source/$',
form_source_legacy, name='form_source_legacy'),
url(r'^app_data/$', AppDataView.as_view(), name=AppDataView.urlname),
url(r'^summary/$', AppFormSummaryView.as_view(), name=AppFormSummaryView.urlname),
url(r'^summary/case/$', AppCaseSummaryView.as_view(), name=AppCaseSummaryView.urlname),
url(r'^summary/form/$', AppFormSummaryView.as_view(), name=AppFormSummaryView.urlname),
url(r'^summary/case/download/$', DownloadCaseSummaryView.as_view(), name=DownloadCaseSummaryView.urlname),
url(r'^summary/form/download/$', DownloadFormSummaryView.as_view(), name=DownloadFormSummaryView.urlname),
url(r'^summary/app/download/$', DownloadAppSummaryView.as_view(), name=DownloadAppSummaryView.urlname),
url(r'^update_build_comment/$', update_build_comment,
name='update_build_comment'),
url(r'^update_prompts/$', PromptSettingsUpdateView.as_view(), name=PromptSettingsUpdateView.urlname),
url(r'^form_has_submissions/(?P<form_unique_id>[\w-]+)/$', FormHasSubmissionsView.as_view(),
name=FormHasSubmissionsView.urlname),
]
urlpatterns = [
url(r'^browse/(?P<app_id>[\w-]+)/(?P<form_unique_id>[\w-]+)/source/$',
get_xform_source, name='get_xform_source'),
url(r'^source/(?P<app_id>[\w-]+)/$', app_source, name='app_source'),
url(r'^app_exchange/$', app_exchange, name='app_exchange'),
url(r'^import_app/$', import_app, name='import_app'),
url(r'^app_from_template/(?P<slug>[\w-]+)/$', app_from_template, name='app_from_template'),
url(r'^copy_app/$', copy_app, name='copy_app'),
url(r'^view/(?P<app_id>[\w-]+)/', include(app_urls)),
url(r'^compare/(?P<first_app_id>[\w-]+)..(?P<second_app_id>[\w-]+)',
FormSummaryDiffView.as_view(), name=FormSummaryDiffView.urlname),
url(r'^schema/(?P<app_id>[\w-]+)/form/(?P<form_unique_id>[\w-]+)/$',
get_form_data_schema, name='get_form_data_schema'),
url(r'^new_module/(?P<app_id>[\w-]+)/$', new_module, name='new_module'),
url(r'^new_app/$', new_app, name='new_app'),
url(r'^default_new_app/$', default_new_app, name='default_new_app'),
url(r'^new_form/(?P<app_id>[\w-]+)/(?P<module_unique_id>[\w-]+)/$',
new_form, name='new_form'),
url(r'^drop_user_case/(?P<app_id>[\w-]+)/$', drop_user_case, name='drop_user_case'),
url(r'^pull_master/(?P<app_id>[\w-]+)/$', pull_master_app, name='pull_master_app'),
url(r'^pull_missing_multimedia/(?P<app_id>[\w-]+)/$', pull_missing_multimedia,
name='pull_missing_multimedia'),
url(r'^linked_whitelist/(?P<app_id>[\w-]+)/$', update_linked_whitelist, name='update_linked_whitelist'),
url(r'^delete_app/(?P<app_id>[\w-]+)/$', delete_app, name='delete_app'),
url(r'^delete_module/(?P<app_id>[\w-]+)/(?P<module_unique_id>[\w-]+)/$',
delete_module, name="delete_module"),
url(r'^delete_form/(?P<app_id>[\w-]+)/(?P<module_unique_id>[\w-]+)/(?P<form_unique_id>[\w-]+)/$',
delete_form, name="delete_form"),
url(r'^overwrite_module_case_list/(?P<app_id>[\w-]+)/(?P<module_unique_id>[\w-]+)/$',
overwrite_module_case_list, name='overwrite_module_case_list'),
url(r'^copy_form/(?P<app_id>[\w-]+)/(?P<form_unique_id>[\w-]+)/$', copy_form, name='copy_form'),
url(r'^undo_delete_app/(?P<record_id>[\w-]+)/$', undo_delete_app,
name='undo_delete_app'),
url(r'^undo_delete_module/$', undo_delete_module,
name='undo_delete_module'),
url(r'^undo_delete_form/(?P<record_id>[\w-]+)/$', undo_delete_form,
name='undo_delete_form'),
url(r'^edit_form_attr/(?P<app_id>[\w-]+)/(?P<form_unique_id>[\w-]+)/(?P<attr>[\w-]+)/$',
edit_form_attr, name='edit_form_attr'),
url(r'^edit_form_attr_api/(?P<app_id>[\w-]+)/(?P<form_unique_id>[\w-]+)/(?P<attr>[\w-]+)/$',
edit_form_attr_api, name='edit_form_attr_api'),
url(r'^patch_xform/(?P<app_id>[\w-]+)/(?P<form_unique_id>[\w-]+)/$',
patch_xform, name='patch_xform'),
url(r'^validate_form_for_build/(?P<app_id>[\w-]+)/(?P<form_unique_id>[\w-]+)/$',
validate_form_for_build, name='validate_form_for_build'),
url(r'^edit_form_actions/(?P<app_id>[\w-]+)/(?P<form_unique_id>[\w-]+)/$',
edit_form_actions, name='edit_form_actions'),
url(r'^edit_advanced_form_actions/(?P<app_id>[\w-]+)/(?P<form_unique_id>[\w-]+)/$',
edit_advanced_form_actions, name='edit_advanced_form_actions'),
# Scheduler Modules
url(r'^edit_visit_schedule/(?P<app_id>[\w-]+)/(?P<form_unique_id>[\w-]+)/$',
edit_visit_schedule, name='edit_visit_schedule'),
url(r'^edit_schedule_phases/(?P<app_id>[\w-]+)/(?P<module_unique_id>[\w-]+)/$',
edit_schedule_phases,
name='edit_schedule_phases'),
# multimedia stuff
url(r'^(?P<app_id>[\w-]+)/multimedia/', include(hqmedia_urls)),
url(r'^copy_multimedia/(?P<app_id>[\w-]+)/$', copy_multimedia, name='copy_multimedia'),
url(r'^edit_module_detail_screens/(?P<app_id>[\w-]+)/(?P<module_unique_id>[\w-]+)/$',
edit_module_detail_screens, name='edit_module_detail_screens'),
url(r'^edit_module_attr/(?P<app_id>[\w-]+)/(?P<module_unique_id>[\w-]+)/(?P<attr>[\w-]+)/$',
edit_module_attr, name='edit_module_attr'),
url(r'^edit_report_module/(?P<app_id>[\w-]+)/(?P<module_unique_id>[\w-]+)/$',
edit_report_module, name='edit_report_module'),
url(r'^validate_module_for_build/(?P<app_id>[\w-]+)/(?P<module_unique_id>[\w-]+)/$',
validate_module_for_build, name='validate_module_for_build'),
url(r'^commcare_profile/(?P<app_id>[\w-]+)/$', commcare_profile, name='commcare_profile'),
url(r'^edit_commcare_profile/(?P<app_id>[\w-]+)/$', edit_commcare_profile,
name='edit_commcare_profile'),
url(r'^edit_commcare_settings/(?P<app_id>[\w-]+)/$',
edit_commcare_settings, name='edit_commcare_settings'),
url(r'^edit_app_langs/(?P<app_id>[\w-]+)/$', edit_app_langs,
name='edit_app_langs'),
url(r'^edit_app_attr/(?P<app_id>[\w-]+)/(?P<attr>[\w-]+)/$',
edit_app_attr, name='edit_app_attr'),
url(r'^edit_app_ui_translations/(?P<app_id>[\w-]+)/$', edit_app_ui_translations,
name='edit_app_ui_translations'),
url(r'^get_app_ui_translations/$', get_app_ui_translations, name='get_app_ui_translations'),
url(r'^rearrange/(?P<app_id>[\w-]+)/(?P<key>[\w-]+)/$', rearrange, name='rearrange'),
url(r'^move_child_modules_after_parents/(?P<app_id>[\w-]+)/$', move_child_modules_after_parents,
name='move_child_modules_after_parents'),
url(r'^upgrade_shadow_module/(?P<app_id>[\w-]+)/(?P<module_unique_id>[\w-]+)/$',
upgrade_shadow_module, name='upgrade_shadow_module'),
url(r'^odk/(?P<app_id>[\w-]+)/qr_code/$', odk_qr_code, name='odk_qr_code'),
url(r'^odk/(?P<app_id>[\w-]+)/media_qr_code/$', odk_media_qr_code, name='odk_media_qr_code'),
url(r'^odk/(?P<app_id>[\w-]+)/install/$', odk_install, name="odk_install"),
url(r'^odk/(?P<app_id>[\w-]+)/media_install/$', odk_install, {'with_media': True}, name="odk_media_install"),
url(r'^odk/(?P<app_id>[\w-]+)/short_url/$', short_url, name='short_url'),
url(r'^odk/(?P<app_id>[\w-]+)/short_odk_media_url/$', short_odk_url, {'with_media': True}),
url(r'^odk/(?P<app_id>[\w-]+)/short_odk_url/$', short_odk_url),
url(r'^save/(?P<app_id>[\w-]+)/$', save_copy, name='save_copy'),
url(r'^revert/(?P<app_id>[\w-]+)/$', revert_to_copy, name='revert_to_copy'),
url(r'^delete_copy/(?P<app_id>[\w-]+)/$', delete_copy, name='delete_copy'),
url(r'^api/list_apps/$', list_apps, name='list_apps'),
url(r'^api/download_ccz/$', direct_ccz, name='direct_ccz'),
url(r'^download/(?P<app_id>[\w-]+)/$', download_index, name='download_index'),
url(r'^build_profile/(?P<build_id>[\w-]+)/toggle/(?P<build_profile_id>[\w-]+)$', toggle_build_profile,
name='toggle_build_profile'),
# the order of these download urls is important
url(r'^download/(?P<app_id>[\w-]+)/CommCare.ccz$', DownloadCCZ.as_view(),
name=DownloadCCZ.name),
url(r'^download/(?P<app_id>[\w-]+)/multimedia/', include(media_download_urls)),
url(r'^download/(?P<app_id>[\w-]+)/(?P<path>.*)$', download_file,
name='app_download_file'),
url(r'^download/(?P<app_id>[\w-]+)/',
include('corehq.apps.app_manager.download_urls')),
url(r'^diff/(?P<first_app_id>[\w-]+)/(?P<second_app_id>[\w-]+)/$', AppDiffView.as_view(), name=AppDiffView.urlname),
url(r'existing_case_types', ExistingCaseTypesView.as_view(), name=ExistingCaseTypesView.urlname),
url(r'^', include('custom.ucla.urls')),
]
|
StarcoderdataPython
|
1763718
|
<filename>tests/lspopt_ref.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`lspopt_ref`
==================
.. module:: lspopt_ref
:platform: Unix, Windows
:synopsis:
.. moduleauthor:: hbldh <<EMAIL>>
Created on 2015-11-13
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import six
import numpy as np
from lspopt.data import C, WEIGHTS
def lspopt_ref(n, c_parameter=20.0):
"""Reference implementation of the multitaper window calculation.
A direct port of Matlab code obtained from the author of the paper.
Parameters
----------
n : int
Length of multitaper windows
c_parameter : float
The parameter `c` in [1]. Default is 20.0
Returns
-------
H : ndarray
Multitaper windows, of size [n x n]
w : ndarray
Array of taper window weights.
References
----------
...[1] <NAME>. (2011). Optimal multitaper Wigner spectrum
estimation of a class of locally stationary processes using Hermite functions.
EURASIP Journal on Advances in Signal Processing, 2011, 10.
"""
c = C
Wmat = WEIGHTS
k = int(round((c_parameter - 1) * 10))
c = c[k]
wei = Wmat[:, k]
wei = wei[np.nonzero(wei)]
K = len(wei)
if K > 10:
K = 10
wei = wei[:K]
wei /= np.sum(wei)
t1 = np.arange(-(n / 2) + 1, (n / 2) + 0.1, step=1.0) / _get_f1(n, K)
h = np.ones((n,))
if K > 1:
h = np.vstack((h, 2 * t1))
if K > 2:
for i in range(1, K - 1):
h = np.vstack((h, (2 * t1 * h.T[:, i]) - 2 * i * h.T[:, i - 1]))
H = h.T * np.outer(np.exp(-(t1 ** 2) / 2), np.ones((K,), "float"))
for i in six.moves.range(K):
H[:, i] = H[:, i] / np.sqrt(H[:, i].T.dot(H[:, i])) # Norm
return H.T, wei
def _get_f1(N, K):
if K == 1:
return N / 5.4
if K == 2:
return N / 6.
if K == 3:
return N / 7.3
if K == 4:
return N / 8.1
if K == 5:
return N / 8.7
if K == 6:
return N / 9.3
if K == 7:
return N / 9.8
if K == 8:
return N / 10.3
if K == 9:
return N / 10.9
if K == 10:
return N / 11.2
raise ValueError("K was not in [1, 10]!")
|
StarcoderdataPython
|
3416652
|
<gh_stars>0
# Copyright 2020 <NAME> (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tensorflow as tf
from ..configs.config import RunningConfig
from ..featurizers.text_featurizers import TextFeaturizer
from ..losses.ctc_losses import ctc_loss
from .base_runners import BaseTrainer
from ..optimizers.accumulation import GradientAccumulation
class CTCTrainer(BaseTrainer):
""" Trainer for CTC Models """
def __init__(self,
text_featurizer: TextFeaturizer,
config: RunningConfig,
strategy: tf.distribute.Strategy = None):
self.text_featurizer = text_featurizer
super(CTCTrainer, self).__init__(config=config, strategy=strategy)
def set_train_metrics(self):
self.train_metrics = {
"ctc_loss": tf.keras.metrics.Mean("train_ctc_loss", dtype=tf.float32)
}
def set_eval_metrics(self):
self.eval_metrics = {
"ctc_loss": tf.keras.metrics.Mean("eval_ctc_loss", dtype=tf.float32),
}
def save_model_weights(self):
with self.strategy.scope():
self.model.save_weights(os.path.join(self.config.outdir, "latest.h5"))
@tf.function(experimental_relax_shapes=True)
def _train_step(self, batch):
_, features, input_length, labels, label_length, _ = batch
with tf.GradientTape() as tape:
y_pred = self.model(features, training=True)
tape.watch(y_pred)
per_train_loss = ctc_loss(
y_true=labels, y_pred=y_pred,
input_length=(input_length // self.model.time_reduction_factor),
label_length=label_length,
blank=self.text_featurizer.blank
)
train_loss = tf.nn.compute_average_loss(per_train_loss,
global_batch_size=self.global_batch_size)
gradients = tape.gradient(train_loss, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))
self.train_metrics["ctc_loss"].update_state(per_train_loss)
@tf.function(experimental_relax_shapes=True)
def _eval_step(self, batch):
_, features, input_length, labels, label_length, _ = batch
logits = self.model(features, training=False)
per_eval_loss = ctc_loss(
y_true=labels, y_pred=logits,
input_length=(input_length // self.model.time_reduction_factor),
label_length=label_length,
blank=self.text_featurizer.blank
)
# Update metrics
self.eval_metrics["ctc_loss"].update_state(per_eval_loss)
def compile(self, model: tf.keras.Model,
optimizer: any,
max_to_keep: int = 10):
with self.strategy.scope():
self.model = model
self.optimizer = tf.keras.optimizers.get(optimizer)
self.create_checkpoint_manager(max_to_keep, model=self.model, optimizer=self.optimizer)
class CTCTrainerGA(CTCTrainer):
""" Trainer for CTC Models """
@tf.function
def _train_function(self, iterator):
for _ in range(self.config.accumulation_steps):
batch = next(iterator)
self.strategy.run(self._train_step, args=(batch,))
self.strategy.run(self._apply_gradients, args=())
@tf.function
def _apply_gradients(self):
self.optimizer.apply_gradients(
zip(self.accumulation.gradients, self.model.trainable_variables))
self.accumulation.reset()
@tf.function(experimental_relax_shapes=True)
def _train_step(self, batch):
_, features, input_length, labels, label_length, _ = batch
with tf.GradientTape() as tape:
y_pred = self.model(features, training=True)
tape.watch(y_pred)
per_train_loss = ctc_loss(
y_true=labels, y_pred=y_pred,
input_length=(input_length // self.model.time_reduction_factor),
label_length=label_length,
blank=self.text_featurizer.blank
)
train_loss = tf.nn.compute_average_loss(per_train_loss,
global_batch_size=self.global_batch_size)
gradients = tape.gradient(train_loss, self.model.trainable_variables)
self.accumulation.accumulate(gradients)
self.train_metrics["ctc_loss"].update_state(per_train_loss)
def compile(self, model: tf.keras.Model,
optimizer: any,
max_to_keep: int = 10):
with self.strategy.scope():
self.model = model
self.optimizer = tf.keras.optimizers.get(optimizer)
self.create_checkpoint_manager(max_to_keep, model=self.model, optimizer=self.optimizer)
self.accumulation = GradientAccumulation(self.model.trainable_variables)
|
StarcoderdataPython
|
1962440
|
<reponame>JuanCruzMedina/betterpros
from typing import Optional
from pydantic import BaseModel
class UserOut(BaseModel): # serializer
user_id: int
user_name: str
email: str
last_conversation_id: Optional[str] = None
|
StarcoderdataPython
|
8093094
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 1 10:55:12 2020
pj: Siteblocker
@author: Hyu1
"""
#Before run this, make sure you run the program to adminstrator (to access to host file)
#Import time.
#import mysql.
import mysql.connector as mysql
db = mysql.connect (
host = "localhost",
user = "root",
passwd = "<PASSWORD>"
)
cursor = db.cursor()
cursor.execute("SELECT")
#Rename datetime as dt.
from datetime import datetime as dt
#Enter directory of the host file. This is for Windows. May different depens on your OS.
hosts_path = "C:\Windows\System32\drivers\etc\hosts"
#Enter local ip or the ip of the site you want to redirect when the blocked site is opened.
redirect = "127.0.0.1"
#Ask users how many sites they want to block at a time.
#Run it as a loop.
#Append the values in a list.
siteNum = int(input("How many sites you want to block: "))
for i in range (1, siteNum + 1):
website_list = []
website = input("Enter the site url to block: ")
website_list.append(website)
#Block the sites for specific timing in a day.
#Ask the users for starting and ending time.
startTime = int(input("Enter the starting time to block the site (in 24hr): "))
endTime = int(input("Enter the end time to block the site (in 24hr): "))
#Initiate an infinite loop to check for the time.
while True:
#In blocking hours.
if dt(dt.now().year, dt.now().month, dt.now().day, startTime) < dt.now() < dt(dt.now().year, dt.now().month, dt.now().day, endTime):
#Open host file.
with open(hosts_path, 'r+') as file:
#Read the file content.
content = file.read()
for websites in website_list:
if website in content:
pass
else:
#Write the redirect ip + space + website url.
file.write(redirect + " " + website + "\n")
#In free hours.
else:
#Remove the blocked site url.
#Open the host file.
with open (hosts_path, 'r+') as file:
content = file.readlines()
file.seek(0)
for line in content:
if not any (website in line for website in website_list):
file.write(line)
file.truncate()
|
StarcoderdataPython
|
11358873
|
import numpy as np
import re
import random
import collections
from string import punctuation
add_punc=',。、【 】 “”:;()《》‘’{}?!⑦()、%^>℃:.”“^-——=&#@¥'
all_punc=punctuation+add_punc
# 对文本的预处理
class PreProcess(object):
"""
1、读取数据
2、构建词表
3、提供获取方法
"""
def __init__(self,preparams):
self.sentences = []
self.word = []
self.filename = preparams["filename"]
self.maxWinSize = preparams["maxWinSize"]
self.negSampNumK = preparams["negSampNumK"]
self.idx2token = None
self.token2idx = None
self.dataset = None
self.centers = None
self.contexts = None
self.negatives = None
self.tokenNum = 0
self.read_data()
self.build_data()
self.get_center_and_contexts()
self.get_negative_samples()
def read_data(self):
assert self.filename != "", "filename could not be empty!"
# 读取语料库
with open(self.filename, 'r', encoding='UTF-8-sig') as f:
while True:
line = f.readline()
if line == None or line == "":
break
# 去除换行符、前后空格
line = line.strip()
line = re.sub(r"[0-9\s+\.\!\/_,$%^*()?;;:-【】+\"\']+|[+—!,;:。?、~@#¥%……&*()]+", " ", line)
if line != "":
seg = re.split(" ",line)
if seg[0] != '<':
res = [s for s in seg if s != "" and s not in all_punc]
if len(res) >= 3:
self.sentences.append(res)
def build_data(self):
# 计算一个词被丢弃的概率
def disp(idx):
return np.random.uniform(0,1) < 1 - np.sqrt(1e-4 / self.counter[self.idx2token[idx]] * self.tokenNum)
# 从读取的句子中提取词
self.word = [w for s in self.sentences for w in s]
# 计算词频
self.counter = collections.Counter(self.word)
# 过滤低频词 保留频数>=5的词
self.counter = dict(filter(
lambda x:x[1] >= 5, self.counter.items()
))
# 构造 索引-词 表
self.idx2token = [c for c, _ in self.counter.items()]
# 构造 词-索引 表
self.token2idx = {c:idx for idx, c in enumerate(self.idx2token)}
# self.dataset = [[self.token2idx[t] for t in s if t in self.token2idx] for s in self.sentences]
self.tokenNum = len(self.idx2token)
# self.subset = [[t for t in s if not disp(t)] for s in self.dataset]
# 丢弃高频词后的词表
self.subset = [[self.token2idx[t] for t in s if t in self.token2idx and not disp(self.token2idx[t])] for s in self.sentences]
# 更新词表大小
self.tokenNum = sum([len(s) for s in self.subset])
def get_center_and_contexts(self):
"""
获取中心词及对应的上下文
"""
self.centers, self.contexts = [],[]
for s in self.subset:
if len(s) < 2:
continue
self.centers += s
for idx in range(len(s)):
winSize = np.random.randint(1, self.maxWinSize)
idxs = list(range(max(0, idx - winSize), min(len(s), idx + 1 + winSize)))
idxs.remove(idx)
self.contexts.append([s[idx] for idx in idxs])
# for center, context in zip(self.centers, self.contexts):
# print('center', center, 'has contexts', context)
def get_negative_samples(self):
"""
负采样
根据word2vec论文 每个词采样权重设置为词频的0.75次方
"""
self.negatives, neg_tmp = [], []
weight = [self.counter[w] ** 0.75 for w in self.idx2token]
i = 0
p = list(range(len(weight)))
for context in self.contexts:
negs = []
while len(negs) < len(context) * self.negSampNumK:
if i == len(neg_tmp):
i = 0
neg_tmp = random.choices(p, weight, k=int(1e5))
neg = neg_tmp[i]
i += 1
# 负采样的词不可出现在上下文词汇中
if neg not in set(context):
negs.append(neg)
self.negatives.append(negs)
# print(self.negatives)
# 提供相应属性的获取方法
def get_num(self):
return self.tokenNum
def get_data(self):
return self.centers, self.contexts, self.negatives
def get_idx2token(self):
return self.idx2token
def get_token2idx(self):
return self.token2idx
def __repr__(self):
return "PreProcess()"
# 词向量训练
class Word2Vec(object):
"""
word2vec的实现
输入参数 word_size: 词表大小 embdding_size: 词向量维数 eta:学习率(步长)n_iters:迭代次数
采用梯度上升法
"""
def __init__(self, w2vparams):
self.word_size = w2vparams["word_size"]
self.embdding_size = w2vparams["embdding_size"]
self.w = np.random.normal(loc=0,scale=1,size=(self.word_size,self.embdding_size))
self.theta = np.random.normal(loc=0,scale=1,size=(self.word_size,self.embdding_size))
def sigmoid(self, x):
return 1. / (1. + np.exp(-x))
def fit(self, center, context, negative, eta=0.1, n_iters=10):
"""
训练函数
输入为中心词、上下文词汇、负采样词汇
"""
y = np.zeros((len(negative)))
cur_iter = 0
while cur_iter < n_iters:
for i in range(len(context)):
e = 0
for j in range(len(negative)):
y_hat = self.sigmoid(np.dot(self.w[context[i]].reshape(1,-1), self.theta[negative[j]].reshape(-1,1)))
g = (y[j] - y_hat) * eta
e += g[0][0] * self.theta[negative[j]]
self.theta[negative[j]] += g[0][0] * self.w[context[i]]
# print("e",e)
# break
self.w[context[i]] += e
cur_iter += 1
# print("one fit done.")
# 获取词向量
def get_w(self):
return self.w
def __repr__(self):
return "Word2Vec()"
def main():
# 读取文本并构建词典
preparams = {"filename":"two.txt","maxWinSize":2,"negSampNumK":4}
preProcess = PreProcess(preparams)
idx2token = preProcess.get_idx2token()
# 词向量初始化
w2vparams = {"word_size":len(idx2token),"embdding_size":100}
w2v = Word2Vec(w2vparams)
centers, contexts, negatives = preProcess.get_data()
assert len(contexts) == len(centers) and len(contexts) == len(negatives)
# 训练
for i in range(len(centers)):
w2v.fit(centers[i], contexts[i], negatives[i])
print(i + 1, '/',len(centers),'done.')
w2v_w = w2v.get_w()
# 保存词向量
np.save('w.npy',w2v_w)
print("w.shape",w2v_w.shape)
# 保存词表
np.save('idx2token.npy', idx2token)
print("done.")
# print("len idx", len(idx2token))
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
11362879
|
#euklidov algoritam racunanja NZD
#(iterativni)
x = int(input("Unesi X "))
y = int(input("Unesi Y "))
while x != y:
if x > y:
x = x - y
else:
y = y -x
print("NZD = ", x)
|
StarcoderdataPython
|
1633858
|
<reponame>WorldWideTelescope/pywwt-web<filename>pywwt/utils.py
import numpy as np
import pytz
from astropy.io import fits
from astropy.coordinates import ICRS
from astropy.time import Time
from datetime import datetime
from reproject import reproject_interp
from reproject.mosaicking import find_optimal_celestial_wcs
__all__ = ["sanitize_image"]
def sanitize_image(image, output_file, overwrite=False, hdu_index=None, **kwargs):
"""
Transform a FITS image so that it is in equatorial coordinates with a TAN
projection and floating-point values, all of which are required to work
correctly in WWT at the moment.
Image can be a filename, an HDU, or a tuple of (array, WCS).
"""
# In case of a FITS file with more than one HDU, we need to choose one
if isinstance(image, str):
with fits.open(image) as hdul:
if hdu_index is not None:
image = hdul[hdu_index]
else:
for hdu in hdul:
if (
hasattr(hdu, "shape")
and len(hdu.shape) > 1
and type(hdu) is not fits.hdu.table.BinTableHDU
):
break
image = hdu
transform_to_wwt_supported_fits(image, output_file, overwrite)
else:
transform_to_wwt_supported_fits(image, output_file, overwrite)
def transform_to_wwt_supported_fits(image, output_file, overwrite):
# Workaround because `reproject` currently only accepts 2D inputs. This is a
# hack and it would be better to update reproject to do this processing.
# Also, this logic is copy/pasting `toasty.collection.SimpleFitsCollection`.
import warnings
from reproject.utils import parse_input_data
with warnings.catch_warnings():
# Sorry, Astropy, no one cares if you fixed the FITS.
warnings.simplefilter("ignore")
data, wcs = parse_input_data(image)
if wcs.naxis != 2:
if not wcs.has_celestial:
raise Exception(
f"cannot process input `{image}`: WCS cannot be reduced to 2D celestial"
)
full_wcs = wcs
wcs = full_wcs.celestial
# note: get_axis_types returns axes in FITS order, innermost first
keep_axes = [
t.get("coordinate_type") == "celestial"
for t in full_wcs.get_axis_types()[::-1]
]
for axnum, (keep, axlen) in enumerate(zip(keep_axes, data.shape)):
if not keep and axlen != 1:
# This is a non-celestial axis that we need to drop, but its
# size is not one. So in principle the user should tell us which
# plane to chose. We can't do that here, so just complain --
# that's better than giving a hard error since this way the user
# can at least see *something*.
warnings.warn(
f"taking first plane (out of {axlen}) in non-celestial image axis #{axnum} in input `{image}`"
)
data = data[tuple(slice(None) if k else 0 for k in keep_axes)]
image = (data, wcs)
# End workaround.
wcs, shape_out = find_optimal_celestial_wcs([image], frame=ICRS(), projection="TAN")
array = reproject_interp(image, wcs, shape_out=shape_out, return_footprint=False)
fits.writeto(
output_file, array.astype(np.float32), wcs.to_header(), overwrite=overwrite
)
def validate_traits(cls, traits):
"""
Helper function to ensure user-provided trait names match those of the
class they're being used to instantiate.
"""
mismatch = [key for key in traits if key not in cls.trait_names()]
if mismatch:
raise KeyError(
"Key{0} {1} do{2}n't match any layer trait name".format(
"s" if len(mismatch) > 1 else "",
mismatch,
"" if len(mismatch) > 1 else "es",
)
)
def ensure_utc(tm, str_allowed):
"""
Helper function to convert a time object (Time, datetime, or UTC string
if str_allowed == True) into UTC before passing it to WWT.
str_allowed is True for wwt.set_current_time (core.py) and False for TableLayer's 'time_att' implementation (layers.py).
"""
if tm is None:
utc_tm = datetime.utcnow().astimezone(pytz.UTC).isoformat()
elif isinstance(tm, datetime):
if tm.tzinfo is None:
utc_tm = pytz.utc.localize(tm).isoformat()
elif tm.tzinfo == pytz.UTC:
utc_tm = tm.isoformat()
else: # has a non-UTC time zone
utc_tm = tm.astimezone(pytz.UTC).isoformat()
elif isinstance(tm, Time):
utc_tm = tm.to_datetime(pytz.UTC).isoformat()
else:
if str_allowed: # is an ISOT string
dt = Time(tm, format="isot").to_datetime(pytz.UTC)
utc_tm = dt.isoformat()
else:
raise ValueError("Time must be a datetime or astropy.Time object")
return utc_tm
|
StarcoderdataPython
|
1653775
|
<filename>03_customer_tensorflow_keras_nlp/util/preprocessing.py<gh_stars>10-100
from __future__ import division
# Python Built-Ins:
import gzip
import os
import shutil
import subprocess
import tarfile
import time
from typing import Optional
# External Dependencies:
import numpy as np
from sklearn import preprocessing
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
def wait_for_file_stable(path: str, stable_secs: int=60, poll_secs: Optional[int]=None) -> bool:
"""Wait for a file to become stable (not recently modified) & return existence
Returns False if file does not exist. Raises FileNotFoundError if file deleted during polling.
When running through the two notebooks at the same time in parallel, this helps to minimize any
errors caused by initiating multiple downloads/extractions/etc on the same file in parallel.
"""
if not poll_secs:
poll_secs = stable_secs / 4
try:
init_stat = os.stat(path)
except FileNotFoundError:
return False
if (time.time() - init_stat.st_mtime) < stable_secs:
print(f"Waiting for file to stabilize... {path}")
while (time.time() - os.stat(path).st_mtime) < stable_secs:
time.sleep(poll_secs)
print("File ready")
return True
def dummy_encode_labels(df,label):
encoder = preprocessing.LabelEncoder()
encoded_y = encoder.fit_transform(df[label].values)
num_classes = len(encoder.classes_)
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y = np.eye(num_classes, dtype="float32")[encoded_y]
return dummy_y, encoder.classes_
def tokenize_and_pad_docs(df, columns, max_length=40):
docs = df[columns].values
# prepare tokenizer
t = Tokenizer()
t.fit_on_texts(docs)
vocab_size = len(t.word_index) + 1
# integer encode the documents
encoded_docs = t.texts_to_sequences(docs)
print(f"Vocabulary size: {vocab_size}")
print("Padding docs to max_length={} (truncating {} docs)".format(
max_length,
sum(1 for doc in encoded_docs if len(doc) > max_length),
))
padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding="post")
print(f"Number of headlines: {len(padded_docs)}")
return padded_docs, t
def get_word_embeddings(t, folder, lang="en"):
"""Download pre-trained word vectors and construct an embedding matrix for tokenizer `t`
Any tokens in `t` not found in the embedding vectors are mapped to all-zeros.
"""
vecs_url = f"https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.{lang}.300.vec.gz"
vecs_gz_filename = vecs_url.rpartition("/")[2]
os.makedirs(folder, exist_ok=True)
vecs_gz_filepath = os.path.join(folder, vecs_gz_filename)
# Tokenizer.num_words is nullable, and there's an OOV token, so:
tokenizer_vocab_size = len(t.word_index) + 1
if wait_for_file_stable(vecs_gz_filepath):
print("Using existing embeddings file")
else:
print("Downloading word vectors...")
subprocess.run([" ".join(["wget", "-NP", folder, vecs_url])], check=True, shell=True)
print("Loading into memory...")
embeddings_index = dict()
with gzip.open(vecs_gz_filepath, "rt") as zipf:
firstline = zipf.readline()
emb_vocab_size, emb_d = firstline.split(" ")
emb_vocab_size = int(emb_vocab_size)
emb_d = int(emb_d)
for line in zipf:
values = line.split()
word = values[0]
# Only load subset of the embeddings recognised by the tokenizer:
if word in t.word_index:
coefs = np.asarray(values[1:], dtype="float32")
embeddings_index[word] = coefs
print("Loaded {} of {} word vectors for tokenizer vocabulary length {}".format(
len(embeddings_index),
emb_vocab_size,
tokenizer_vocab_size,
))
# create a weight matrix for words in training docs
embedding_matrix = np.zeros((tokenizer_vocab_size, emb_d))
for word, i in t.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return embedding_matrix
|
StarcoderdataPython
|
1756158
|
<gh_stars>1-10
# flake8: noqa
from .. import conf
from .fields import ImageSpecField, ProcessedImageField
|
StarcoderdataPython
|
4986474
|
<filename>aizynthfinder/context/policy/policies.py
""" Module containing classes that interfaces neural network policies
"""
from __future__ import annotations
from typing import TYPE_CHECKING
from aizynthfinder.utils.loading import load_dynamic_class
from aizynthfinder.utils.exceptions import PolicyException
from aizynthfinder.context.collection import ContextCollection
from aizynthfinder.context.policy.expansion_strategies import (
ExpansionStrategy,
TemplateBasedExpansionStrategy,
)
from aizynthfinder.context.policy.filter_strategies import (
FilterStrategy,
QuickKerasFilter,
)
from aizynthfinder.context.policy.expansion_strategies import (
__name__ as expansion_strategy_module,
)
from aizynthfinder.context.policy.filter_strategies import (
__name__ as filter_strategy_module,
)
if TYPE_CHECKING:
from aizynthfinder.utils.type_utils import Any, Sequence, List, Tuple
from aizynthfinder.context.config import Configuration
from aizynthfinder.chem import TreeMolecule
from aizynthfinder.chem.reaction import RetroReaction
class ExpansionPolicy(ContextCollection):
"""
An abstraction of an expansion policy.
This policy provides actions that can be applied to a molecule
:param config: the configuration of the tree search
"""
_collection_name = "expansion policy"
def __init__(self, config: Configuration) -> None:
super().__init__()
self._config = config
def __call__(
self, molecules: Sequence[TreeMolecule]
) -> Tuple[List[RetroReaction], List[float]]:
return self.get_actions(molecules)
def get_actions(
self, molecules: Sequence[TreeMolecule]
) -> Tuple[List[RetroReaction], List[float]]:
"""
Get all the probable actions of a set of molecules, using the selected policies
:param molecules: the molecules to consider
:return: the actions and the priors of those actions
:raises: PolicyException: if the policy isn't selected
"""
if not self.selection:
raise PolicyException("No expansion policy selected")
all_possible_actions = []
all_priors = []
for name in self.selection:
possible_actions, priors = self[name].get_actions(molecules)
all_possible_actions.extend(possible_actions)
all_priors.extend(priors)
if not self._config.additive_expansion and all_possible_actions:
break
return all_possible_actions, all_priors
def load(self, source: ExpansionStrategy) -> None: # type: ignore
"""
Add a pre-initialized expansion strategy object to the policy
:param source: the item to add
"""
if not isinstance(source, ExpansionStrategy):
raise PolicyException(
"Only objects of classes inherited from ExpansionStrategy can be added"
)
self._items[source.key] = source
def load_from_config(self, **config: Any) -> None:
"""
Load one or more expansion policy from a configuration
The format should be
files:
key:
- path_to_model
- path_to_templates
or
template-based:
key:
- path_to_model
- path_to_templates
or
custom_package.custom_model.CustomClass:
key:
param1: value1
param2: value2
:param config: the configuration
"""
files_spec = config.get("files", config.get("template-based", {}))
for key, policy_spec in files_spec.items():
modelfile, templatefile = policy_spec
strategy = TemplateBasedExpansionStrategy(
key, self._config, source=modelfile, templatefile=templatefile
)
self.load(strategy)
# Load policies specifying a module and class, e.g. package.module.MyStrategyClass
for strategy_spec, strategy_config in config.items():
if strategy_spec in ["files", "template-based"]:
continue
cls = load_dynamic_class(
strategy_spec, expansion_strategy_module, PolicyException
)
for key, policy_spec in strategy_config.items():
obj = cls(key, self._config, **(policy_spec or {}))
self.load(obj)
class FilterPolicy(ContextCollection):
"""
An abstraction of a filter policy.
This policy provides a query on a reaction to determine whether it should be rejected
:param config: the configuration of the tree search
"""
_collection_name = "filter policy"
def __init__(self, config: Configuration) -> None:
super().__init__()
self._config = config
def __call__(self, reaction: RetroReaction) -> None:
return self.apply(reaction)
def apply(self, reaction: RetroReaction) -> None:
"""
Apply the all the selected filters on the reaction. If the reaction
should be rejected a `RejectionException` is raised
:param reaction: the reaction to filter
:raises: if the reaction should be rejected or if a policy is selected
"""
if not self.selection:
raise PolicyException("No filter policy selected")
for name in self.selection:
self[name](reaction)
def load(self, source: FilterStrategy) -> None: # type: ignore
"""
Add a pre-initialized filter strategy object to the policy
:param source: the item to add
"""
if not isinstance(source, FilterStrategy):
raise PolicyException(
"Only objects of classes inherited from FilterStrategy can be added"
)
self._items[source.key] = source
def load_from_config(self, **config: Any) -> None:
"""
Load one or more filter policy from a configuration
The format should be
files:
key: path_to_model
or
quick-filter:
key: path_to_model
or
custom_package.custom_model.CustomClass:
key:
param1: value1
param2: value2
:param config: the configuration
"""
files_spec = config.get("files", config.get("quick-filter", {}))
for key, modelfile in files_spec.items():
strategy = QuickKerasFilter(key, self._config, source=modelfile)
self.load(strategy)
# Load policies specifying a module and class, e.g. package.module.MyStrategyClass
for strategy_spec, strategy_config in config.items():
if strategy_spec in ["files", "quick-filter"]:
continue
cls = load_dynamic_class(
strategy_spec, filter_strategy_module, PolicyException
)
for key, policy_spec in strategy_config.items():
obj = cls(key, self._config, **(policy_spec or {}))
self.load(obj)
|
StarcoderdataPython
|
1692899
|
# -*- coding: utf-8 -*-
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing infra build stages."""
from __future__ import print_function
import os
import shutil
from chromite.cbuildbot import commands
from chromite.cbuildbot import constants
from chromite.cbuildbot.stages import generic_stages
from chromite.lib import cipd
from chromite.lib import cros_logging as logging
from chromite.lib import osutils
from chromite.lib import path_util
# Prefix appended to package names in _GO_PACKAGES when building CIPD packages.
_CIPD_PACKAGE_PREFIX = 'chromiumos/infra/'
# Names of packages to build and upload. These are Portage package names (minus
# categories) but are also used to produce CIPD package names.
_GO_PACKAGES = [
'lucifer',
'tast-cmd',
'tast-remote-tests-cros',
]
_CRED_FILE = ('/creds/service_accounts/'
'service-account-chromeos-cipd-uploader.json')
class EmergeInfraGoBinariesStage(generic_stages.BuilderStage):
"""Emerge Chromium OS Go binary packages."""
def PerformStage(self):
"""Build infra Go packages."""
self._EmergePackages()
def _EmergePackages(self):
cmd = ['emerge', '--deep']
cmd.extend(_GO_PACKAGES)
commands.RunBuildScript(self._build_root, cmd,
sudo=True, enter_chroot=True,
extra_env={'FEATURES=test'})
class PackageInfraGoBinariesStage(generic_stages.BuilderStage,
generic_stages.ArchivingStageMixin):
"""Make CIPD packages for Go binaries."""
def PerformStage(self):
"""Build infra Go packages."""
self._PreparePackagesDir()
for package in _GO_PACKAGES:
files = self._GetPortagePackageFiles(package)
if not files:
logging.warning('Skipping package %s with no files', package)
continue
package_path = _GetPackagePath(self.archive_path, package)
with osutils.TempDir() as staging_dir:
logging.info('Staging %s with %d file(s)', package, len(files))
_StageChrootFilesIntoDir(staging_dir, files)
self._BuildCIPDPackage(package_path, package, staging_dir)
logging.info('Uploading %s package artifact', package)
self.UploadArtifact(package_path, archive=False)
def _PreparePackagesDir(self):
self._PrepareArchiveDir()
packages_dir = _GetPackageDir(self.archive_path)
osutils.SafeMakedirs(packages_dir, 0o775)
def _PrepareArchiveDir(self):
# Make sure local archive directory is prepared, if it was not already.
if not os.path.exists(self.archive_path):
self.archive.SetupArchivePath()
def _GetPortagePackageFiles(self, package):
"""Gets paths of files owned by an installed Portage package.
Args:
package: Portage package name without category.
Returns:
A list of paths of files owned by the package.
"""
cmd = ['equery', '--no-color', '--quiet', 'f', '--filter=obj,cmd', package]
result = commands.RunBuildScript(self._build_root, cmd, enter_chroot=True,
redirect_stdout=True)
return result.output.splitlines()
def _BuildCIPDPackage(self, package_path, package, staging_dir):
"""Build CIPD package."""
logging.info('Building CIPD package %s', package)
cipd.BuildPackage(
cipd_path=cipd.GetCIPDFromCache(),
package=(_CIPD_PACKAGE_PREFIX + package),
in_dir=staging_dir,
outfile=package_path,
)
class RegisterInfraGoPackagesStage(generic_stages.BuilderStage,
generic_stages.ArchivingStageMixin):
"""Upload infra Go binaries."""
def PerformStage(self):
"""Upload infra Go binaries."""
if self._run.options.debug:
logging.info('Skipping CIPD package upload as we are in debug mode')
return
for package in _GO_PACKAGES:
self._RegisterPackage(package)
def _RegisterPackage(self, package):
"""Register CIPD package."""
logging.info('Registering CIPD package %s', package)
cipd.RegisterPackage(
cipd_path=cipd.GetCIPDFromCache(),
package_file=_GetPackagePath(self.archive_path, package),
tags={'version': self._VersionString()},
refs=['latest'],
cred_path=_CRED_FILE,
)
def _VersionString(self):
return self._run.attrs.version_info.VersionString()
class TestPuppetSpecsStage(generic_stages.BuilderStage):
"""Run Puppet RSpec tests."""
def PerformStage(self):
"""Build infra Go packages."""
commands.RunBuildScript(
self._build_root,
['bash', '-c',
'cd ../../chromeos-admin/puppet'
' && make -j -O check GEM=gem19'],
enter_chroot=True)
class TestVenvPackagesStage(generic_stages.BuilderStage):
"""Run unittests for infra venv projects."""
def PerformStage(self):
"""Run untitests for infra venv projects."""
commands.RunBuildScript(
self._build_root,
['./bin/test_venv_packages'],
cwd=os.path.join(constants.SOURCE_ROOT, 'chromeos-admin'),
)
commands.RunBuildScript(
self._build_root,
['./bin/test_venv_packages'],
cwd=os.path.join(constants.SOURCE_ROOT, 'infra', 'skylab_inventory'),
)
commands.RunBuildScript(
self._build_root,
['./bin/run_tests'],
cwd=os.path.join(constants.SOURCE_ROOT, 'infra', 'ci_results_archiver'),
)
def _StageChrootFilesIntoDir(target_path, paths):
"""Install chroot files into a staging directory.
Args:
target_path: Path to the staging directory
paths: An iterable of absolute paths inside the chroot
"""
for path in paths:
chroot_path = path_util.FromChrootPath(path)
install_path = os.path.join(target_path, os.path.relpath(path, '/'))
install_parent = os.path.dirname(install_path)
osutils.SafeMakedirs(install_parent, 0o775)
shutil.copyfile(chroot_path, install_path)
shutil.copymode(chroot_path, install_path)
def _GetPackageDir(archive_path):
"""Get package directory."""
return os.path.join(archive_path, 'infra_go_packages')
def _GetPackagePath(archive_path, package):
"""Get package path."""
return os.path.join(_GetPackageDir(archive_path), '%s.cipd' % package)
|
StarcoderdataPython
|
6697087
|
<reponame>rpharoah/42-workshop<filename>fortytwo/s32_autorun_tests.py
"""32: Auto-Run Tests
Get into testing mode by telling PyCharm to automatically
re-run tests as you type.
- Click the auto-test button and click Play
- Change ``test_32`` that causes failure
- Fix, don't save...still runs
- Configurable delay
Repo: https://github.com/pauleveritt/42-workshop
Playlist: https://www.jetbrains.com/pycharm/guide/playlists/42/
"""
from fortytwo import App
from fortytwo.models import Greeter
def main():
site = App()
with site as container:
greeter = container.get(Greeter)
greeting = greeter('Larry')
return greeting
if __name__ == '__main__':
print(main())
|
StarcoderdataPython
|
50459
|
<filename>indice_pollution/__init__.py
import requests
import csv
from sqlalchemy.orm import joinedload
from indice_pollution.history.models.commune import Commune
from indice_pollution.history.models.indice_atmo import IndiceATMO
from indice_pollution.history.models.episode_pollution import EpisodePollution
from flask import Flask
from flask_manage_webpack import FlaskManageWebpack
from flask_cors import CORS
from flask_migrate import Migrate
from datetime import datetime, timedelta, date
import os
import logging
from indice_pollution.history.models.raep import RAEP
from indice_pollution.history.models.zone import Zone
from .helpers import today
from .extensions import celery
from importlib import import_module
from kombu import Queue
from celery.schedules import crontab
def configure_celery(flask_app):
"""Configure tasks.celery:
* read configuration from flask_app.config and update celery config
* create a task context so tasks can access flask.current_app
Doing so is recommended by flask documentation:
https://flask.palletsprojects.com/en/1.1.x/patterns/celery/
"""
# Settings list:
# https://docs.celeryproject.org/en/stable/userguide/configuration.html
celery_conf = {
key[len('CELERY_'):].lower(): value
for key, value in flask_app.config.items()
if key.startswith('CELERY_')
}
celery.conf.update(celery_conf)
celery.conf.task_queues = (
Queue("default", routing_key='task.#'),
Queue("save_indices", routing_key='save_indices.#'),
)
celery.conf.task_default_exchange = 'tasks'
celery.conf.task_default_exchange_type = 'topic'
celery.conf.task_default_routing_key = 'task.default'
class ContextTask(celery.Task):
def __call__(self, *args, **kwargs):
with flask_app.app_context():
return self.run(*args, **kwargs)
celery.Task = ContextTask
@celery.task()
def save_all_indices():
save_all()
@celery.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(
crontab(minute='0', hour='*/1'),
save_all_indices.s(),
queue='save_indices',
routing_key='save_indices.save_all'
)
def create_app(test_config=None):
app = Flask(
__name__,
instance_relative_config=True,
static_url_path=''
)
app.config.from_mapping(
SECRET_KEY=os.getenv('SECRET_KEY', 'dev'),
SQLALCHEMY_DATABASE_URI=os.getenv('SQLALCHEMY_DATABASE_URI') or os.getenv('POSTGRESQL_ADDON_URI'),
SQLALCHEMY_TRACK_MODIFICATIONS=False
)
app.config['CELERY_RESULT_BACKEND'] = os.getenv('CELERY_RESULT_BACKEND') or f"db+{app.config['SQLALCHEMY_DATABASE_URI']}"
app.config['CELERY_BROKER_URL'] = os.getenv('CELERY_BROKER_URL') or f"sqla+{app.config['SQLALCHEMY_DATABASE_URI']}"
CORS(app, send_wildcard=True)
manage_webpack = FlaskManageWebpack()
manage_webpack.init_app(app)
from .extensions import db
db.init_app(app)
migrate = Migrate(app, db)
configure_celery(app)
with app.app_context():
import indice_pollution.api
import indice_pollution.web
import indice_pollution.history
return app
def make_resp(r, result, date_=None):
if type(result) == list:
if date_:
result = [v for v in result if v['date'] == str(date_)]
elif hasattr(result, 'dict'):
result = [result.dict()]
else:
result = [result]
return {
"data": result,
"metadata": make_metadata(r)
}
def make_metadata(r):
return {
"region": {
"nom": r.__name__.split(".")[-1],
"website": r.Service.website,
"nom_aasqa": r.Service.nom_aasqa
}
}
def forecast(insee, date_=None, use_make_resp=True):
from .regions.solvers import get_region
date_ = date_ or today()
try:
region = get_region(insee)
except KeyError:
return {
"error": f"No region for {insee}",
"metadata": {}
}, 400
if region.Service.is_active:
indice = IndiceATMO.get(insee=insee)
if use_make_resp:
return make_resp(region, indice, date_)
else:
if indice is not None:
indice.region = region
indice.commune = Commune.get(insee)
return indice
else:
indice = IndiceATMO()
indice.region = region
indice.commune = Commune.get(insee)
indice.error = "Inactive region"
return indice
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def bulk(insee_region_names: dict(), date_=None, fetch_episodes=False, fetch_allergenes=False):
from indice_pollution.history.models import IndiceATMO, EpisodePollution
from .regions.solvers import get_region
date_ = date_ or today()
insees = set(insee_region_names.keys())
insees_errors = set()
for insee in insees:
try:
region = get_region(region_name=insee_region_names[insee])
if not region.Service.is_active:
insees_errors.add(insee)
continue
except KeyError:
insees_errors.add(insee)
continue
for insee in insees_errors:
insees.remove(insee)
del insee_region_names[insee]
indices = dict()
episodes = dict()
for chunk in chunks(list(insees), 100):
indices.update(
{i['insee']: IndiceATMO.make_dict(i) for i in IndiceATMO.bulk(date_=date_, insees=chunk)}
)
if fetch_episodes:
episodes.update(
{e['insee']: dict(e) for e in EpisodePollution.bulk(date_=date_, insees=chunk)}
)
to_return = {
insee: {
"forecast": make_resp(
get_region(region_name=insee_region_names[insee]),
indices.get(insee, [])
),
**({
"episode": make_resp(
get_region(region_name=insee_region_names[insee]),
episodes.get(insee, [])
)
} if fetch_episodes else {}
)
}
for insee in insees
}
if fetch_allergenes:
allergenes_par_departement = {
r.zone_id: r
for r in RAEP.get_all()
}
communes = {
c.insee: c
for c in Commune.query.options(
joinedload(Commune.departement)
).populate_existing(
).all()
}
for insee in insees:
c = communes[insee]
if c.departement.zone_id in allergenes_par_departement:
to_return.setdefault(insee, {})
to_return[insee].update({'raep': allergenes_par_departement[c.departement.zone_id].to_dict()})
return to_return
def episodes(insee, date_=None):
from .regions.solvers import get_region
date_ = date_ or today()
if type(date_) == str:
date_ = date.fromisoformat(date_)
region = get_region(insee)
if region.Service.is_active:
result = list(map(lambda e: e.dict(), EpisodePollution.get(insee=insee, date_=date_)))
return make_resp(region, result, date_)
else:
return {
"error": "Inactive region",
"metadata": make_metadata(region)
}, 400
def availability(insee):
from .regions.solvers import get_region
try:
return get_region(insee).Service.is_active
except KeyError:
return False
except AttributeError:
return False
def raep(insee, extended=False):
if insee is None:
return {}
departement = Commune.get(insee).departement
if extended:
pass
else:
data = RAEP.get(zone_id=departement.zone_id)
return {
"departement": {
"nom": departement.nom,
"code": departement.code
},
"data": data.to_dict() if data else None
}
def save_all():
logger = logging.getLogger(__name__)
logger.info('Begin of "save_all" task')
regions = [
'Auvergne-Rhône-Alpes',
'Bourgogne-Franche-Comté',
'Bretagne',
'Centre-Val de Loire',
'Corse',
'Grand Est',
'Guadeloupe',
'Guyane',
'Hauts-de-France',
'Île-de-France',
'Martinique',
'Mayotte',
'Normandie',
'Nouvelle-Aquitaine',
'Occitanie',
'Pays de la Loire',
"Réunion",
"Sud"
]
for region in regions:
logger.info(f'Saving {region} region')
module = import_module(f"indice_pollution.regions.{region}")
if not module.Service.is_active:
continue
logger.info(f'Saving Forecast of {region}')
module.Forecast().save_all()
logger.info(f'Saving Episode of {region}')
module.Episode().save_all()
logger.info(f'Saving of {region} ended')
RAEP.save_all()
|
StarcoderdataPython
|
376732
|
from .cog import BotLog
def setup(bot):
bot.add_cog(BotLog(bot))
|
StarcoderdataPython
|
6653805
|
<gh_stars>0
num1 = int(input())
num2 = int(input())
print(num1-num2)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.